o
    i<                     @  s  d Z ddlmZ ddlZddlZddlmZmZ ddl	m
Z
 ddlmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ dZdZddddd+ddZ ddddd+dd Z!ddddd,d"d#Z"ddddd-d%d&Z#d.d(d)Z$eG d*d$ d$Z%dS )/aR  Methods for making imperative requests to language models with minimal abstraction.

These methods allow you to make requests to LLMs where the only abstraction is input and output schema
translation so you can use all models with the same API.

These methods are thin wrappers around [`Model`][pydantic_ai.models.Model] implementations.
    )annotationsN)IteratorSequence)AbstractAsyncContextManager)	dataclassfield)datetime)TracebackType)RequestUsage)get_event_loop   )agentmessagesmodelssettings)StreamedResponseinstrumented)model_requestmodel_request_syncmodel_request_streammodel_request_stream_syncStreamedResponseSync   model_settingsmodel_request_parameters
instrumentmodel*models.Model | models.KnownModelName | strr   Sequence[messages.ModelMessage]r   settings.ModelSettings | Noner   $models.ModelRequestParameters | Noner   9instrumented_models.InstrumentationSettings | bool | Nonereturnmessages.ModelResponsec                  s,   t | |}|t|||pt I dH S )a  Make a non-streamed request to a model.

    ```py title="model_request_example.py"
    from pydantic_ai import ModelRequest
    from pydantic_ai.direct import model_request


    async def main():
        model_response = await model_request(
            'anthropic:claude-haiku-4-5',
            [ModelRequest.user_text_prompt('What is the capital of France?')]  # (1)!
        )
        print(model_response)
        '''
        ModelResponse(
            parts=[TextPart(content='The capital of France is Paris.')],
            usage=RequestUsage(input_tokens=56, output_tokens=7),
            model_name='claude-haiku-4-5',
            timestamp=datetime.datetime(...),
        )
        '''
    ```

    1. See [`ModelRequest.user_text_prompt`][pydantic_ai.messages.ModelRequest.user_text_prompt] for details.

    Args:
        model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
        messages: Messages to send to the model
        model_settings: optional model settings
        model_request_parameters: optional model request parameters
        instrument: Whether to instrument the request with OpenTelemetry/Logfire, if `None` the value from
            [`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.

    Returns:
        The model response and token usage associated with the request.
    N)_prepare_modelrequestlistr   ModelRequestParametersr   r   r   r   r   model_instance r+   X/var/www/html/karishye-ai-python/venv/lib/python3.10/site-packages/pydantic_ai/direct.pyr   $   s   
,

r   c             	   C  s   t  t| t||||dS )a2  Make a Synchronous, non-streamed request to a model.

    This is a convenience method that wraps [`model_request`][pydantic_ai.direct.model_request] with
    `loop.run_until_complete(...)`. You therefore can't use this method inside async code or if there's an active event loop.

    ```py title="model_request_sync_example.py"
    from pydantic_ai import ModelRequest
    from pydantic_ai.direct import model_request_sync

    model_response = model_request_sync(
        'anthropic:claude-haiku-4-5',
        [ModelRequest.user_text_prompt('What is the capital of France?')]  # (1)!
    )
    print(model_response)
    '''
    ModelResponse(
        parts=[TextPart(content='The capital of France is Paris.')],
        usage=RequestUsage(input_tokens=56, output_tokens=7),
        model_name='claude-haiku-4-5',
        timestamp=datetime.datetime(...),
    )
    '''
    ```

    1. See [`ModelRequest.user_text_prompt`][pydantic_ai.messages.ModelRequest.user_text_prompt] for details.

    Args:
        model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
        messages: Messages to send to the model
        model_settings: optional model settings
        model_request_parameters: optional model request parameters
        instrument: Whether to instrument the request with OpenTelemetry/Logfire, if `None` the value from
            [`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.

    Returns:
        The model response and token usage associated with the request.
    r   )_get_event_looprun_until_completer   r'   r   r   r   r   r   r+   r+   r,   r   X   s   -r   4AbstractAsyncContextManager[models.StreamedResponse]c                C  s$   t | |}|t|||pt S )a  Make a streamed async request to a model.

    ```py {title="model_request_stream_example.py"}

    from pydantic_ai import ModelRequest
    from pydantic_ai.direct import model_request_stream


    async def main():
        messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')]  # (1)!
        async with model_request_stream('openai:gpt-4.1-mini', messages) as stream:
            chunks = []
            async for chunk in stream:
                chunks.append(chunk)
            print(chunks)
            '''
            [
                PartStartEvent(index=0, part=TextPart(content='Albert Einstein was ')),
                FinalResultEvent(tool_name=None, tool_call_id=None),
                PartDeltaEvent(
                    index=0, delta=TextPartDelta(content_delta='a German-born theoretical ')
                ),
                PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')),
                PartEndEvent(
                    index=0,
                    part=TextPart(
                        content='Albert Einstein was a German-born theoretical physicist.'
                    ),
                ),
            ]
            '''
    ```

    1. See [`ModelRequest.user_text_prompt`][pydantic_ai.messages.ModelRequest.user_text_prompt] for details.

    Args:
        model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
        messages: Messages to send to the model
        model_settings: optional model settings
        model_request_parameters: optional model request parameters
        instrument: Whether to instrument the request with OpenTelemetry/Logfire, if `None` the value from
            [`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.

    Returns:
        A [stream response][pydantic_ai.models.StreamedResponse] async context manager.
    )r%   request_streamr'   r   r(   r)   r+   r+   r,   r      s   
6
r   r   c                C  s   t | t||||d}t|S )a  Make a streamed synchronous request to a model.

    This is the synchronous version of [`model_request_stream`][pydantic_ai.direct.model_request_stream].
    It uses threading to run the asynchronous stream in the background while providing a synchronous iterator interface.

    ```py {title="model_request_stream_sync_example.py"}

    from pydantic_ai import ModelRequest
    from pydantic_ai.direct import model_request_stream_sync

    messages = [ModelRequest.user_text_prompt('Who was Albert Einstein?')]
    with model_request_stream_sync('openai:gpt-4.1-mini', messages) as stream:
        chunks = []
        for chunk in stream:
            chunks.append(chunk)
        print(chunks)
        '''
        [
            PartStartEvent(index=0, part=TextPart(content='Albert Einstein was ')),
            FinalResultEvent(tool_name=None, tool_call_id=None),
            PartDeltaEvent(
                index=0, delta=TextPartDelta(content_delta='a German-born theoretical ')
            ),
            PartDeltaEvent(index=0, delta=TextPartDelta(content_delta='physicist.')),
            PartEndEvent(
                index=0,
                part=TextPart(
                    content='Albert Einstein was a German-born theoretical physicist.'
                ),
            ),
        ]
        '''
    ```

    Args:
        model: The model to make a request to. We allow `str` here since the actual list of allowed models changes frequently.
        messages: Messages to send to the model
        model_settings: optional model settings
        model_request_parameters: optional model request parameters
        instrument: Whether to instrument the request with OpenTelemetry/Logfire, if `None` the value from
            [`logfire.instrument_pydantic_ai`][logfire.Logfire.instrument_pydantic_ai] is used.

    Returns:
        A [sync stream response][pydantic_ai.direct.StreamedResponseSync] context manager.
    r/   )r   r'   r   )r   r   r   r   r   async_stream_cmr+   r+   r,   r      s   5r   models.Modelc                 C  s&   t | }|d u rtjj}t||S N)r   infer_modelr   Agent_instrument_defaultinstrumented_modelsinstrument_model)r   r   r*   r+   r+   r,   r%     s   
r%   c                   @  s,  e Zd ZU dZded< eejddZded< eddd	Z	d
ed< eddd	Z
ded< eddd	Zded< eddd	Zded< eejddZded< d>ddZd?ddZd@d!d"ZdAd$d%ZeZdBd&d'ZdCd)d*Zd+d, Zd-d. Zd/d0 ZdDd2d3ZedDd4d5ZdEd7d8ZedAd9d:ZedFd<d=ZdS )Gr   zSynchronous wrapper to async streaming responses by running the async producer in a background thread and providing a synchronous iterator.

    This class must be used as a context manager with the `with` statement.
    z-AbstractAsyncContextManager[StreamedResponse]_async_stream_cmF)default_factoryinitzAqueue.Queue[messages.ModelResponseStreamEvent | Exception | None]_queueN)defaultr<   zthreading.Thread | None_threadzStreamedResponse | None_stream_responsezException | None
_exceptionbool_context_enteredzthreading.Event_stream_readyr#   c                 C  s   d| _ |   | S )NT)rC   _start_producerselfr+   r+   r,   	__enter__+  s   zStreamedResponseSync.__enter__	_exc_typetype[BaseException] | None_exc_valBaseException | None_exc_tbTracebackType | NoneNonec                 C  s   |    d S r4   )_cleanup)rG   rI   rK   rM   r+   r+   r,   __exit__0  s   zStreamedResponseSync.__exit__+Iterator[messages.ModelResponseStreamEvent]c                 c  s8    |    	 | j }|du rdS t|tr||V  q)zsStream the response as an iterable of [`ModelResponseStreamEvent`][pydantic_ai.messages.ModelResponseStreamEvent]s.TN)_check_context_manager_usager=   get
isinstance	Exception)rG   itemr+   r+   r,   __iter__8  s   

zStreamedResponseSync.__iter__strc                 C  s&   | j rt| j S | jj d| j dS )Nz(context_entered=))r@   repr	__class____name__rC   rF   r+   r+   r,   __repr__E  s   
zStreamedResponseSync.__repr__c                 C  s   | j stdd S )NzmStreamedResponseSync must be used as a context manager. Use: `with model_request_stream_sync(...) as stream:`)rC   RuntimeErrorrF   r+   r+   r,   rS   M  s
   z1StreamedResponseSync._check_context_manager_usager   c                 C  s@   |    | jd u r| jjtdstd| jd u rtd| jS )N)timeoutz*Stream failed to initialize within timeoutzStream failed to initialize)rS   r@   rD   waitSTREAM_INITIALIZATION_TIMEOUTr_   rF   r+   r+   r,   _ensure_stream_readyT  s   

z)StreamedResponseSync._ensure_stream_readyc                 C  s    t j| jdd| _| j  d S )NT)targetdaemon)	threadingThread_async_producerr?   startrF   r+   r+   r,   rE   a  s   z$StreamedResponseSync._start_producerc                   s    fdd}t  |  d S )Nc               
     s   zgz7 j 4 I d H #} |  _ j  | 2 z3 d H W } j| q6 W d   I d H  n1 I d H s3w   Y  W n tyV } z j   j| W Y d }~nd }~ww W  jd  d S W  jd  d S  jd  w r4   )r:   r@   rD   setr=   putrV   )streameventerF   r+   r,   _consume_async_streamf  s&   
(
zCStreamedResponseSync._async_producer.<locals>._consume_async_stream)r-   r.   )rG   ro   r+   rF   r,   rh   e  s   z$StreamedResponseSync._async_producerc                 C  s&   | j r| j  r| j   d S d S d S r4   )r?   is_alivejoinrF   r+   r+   r,   rP   w  s   zStreamedResponseSync._cleanupr$   c                 C     |    S )zDBuild a ModelResponse from the data received from the stream so far.)rc   rT   rF   r+   r+   r,   rT   |     zStreamedResponseSync.getc                 C  s   |   S )z&Get the current state of the response.)rT   rF   r+   r+   r,   response  s   zStreamedResponseSync.responser
   c                 C  rr   )z%Get the usage of the response so far.)rc   usagerF   r+   r+   r,   ru     rs   zStreamedResponseSync.usagec                 C  
   |   jS )z#Get the model name of the response.)rc   
model_namerF   r+   r+   r,   rw        
zStreamedResponseSync.model_namer   c                 C  rv   )z"Get the timestamp of the response.)rc   	timestamprF   r+   r+   r,   ry     rx   zStreamedResponseSync.timestamp)r#   r   )rI   rJ   rK   rL   rM   rN   r#   rO   )r#   rR   )r#   rY   )r#   rO   )r#   r   )r#   r$   )r#   r
   )r#   r   ) r]   
__module____qualname____doc____annotations__r   queueQueuer=   r?   r@   rA   rC   rf   EventrD   rH   rQ   rX   r^   __str__rS   rc   rE   rh   rP   rT   propertyrt   ru   rw   ry   r+   r+   r+   r,   r     s:   
 







)r   r   r   r   r   r    r   r!   r   r"   r#   r$   )r   r   r   r   r   r    r   r!   r   r"   r#   r0   )r   r   r   r   r   r    r   r!   r   r"   r#   r   )r   r   r   r"   r#   r3   )&r|   
__future__r   _annotationsr~   rf   collections.abcr   r   
contextlibr   dataclassesr   r   r   typesr	   pydantic_ai.usager
   pydantic_graph._utilsr   r-    r   r   r   r   r   r   r8   __all__rb   r   r   r   r   r%   r   r+   r+   r+   r,   <module>   sD    8<B
@