
    bi2h              
          d dl Z d dlZd dlZd dlmZ d dlmZ d dlmZm	Z	 d dl
mZ d dlmZmZ d dlmZ d dlmZ d dlZd d	lmZ d d
lmZmZmZmZmZ  e       rd dlmZ  e       rd dlmZ  e       rd dl Z  e       r3d dl!m"Z"m#Z# d dl$m%Z% d dl&m'Z' d dl(m)Z) d dl*m+Z+ d dl,m-Z-  e       rd dl.m/Z%  ej`                  e1      Z2dejf                  d<    G d d      Z4e G d d             Z5de5de6de6deddf
dZ7d e8d!e6de8e8   fd"Z9de5fd#Z:d'd$e jv                  fd%Z<e1d&k(  r" e<       Z=e=j}                         \  Z? e:e?       yy)(    N)Sequence)asynccontextmanager)	dataclassfield)chain)PipeProcess)
Connection)Optional)	TrlParser)is_fastapi_availableis_pydantic_availableis_uvicorn_availableis_vllm_ascend_availableis_vllm_available)FastAPI)	BaseModel)LLMSamplingParams)PyNcclCommunicator)get_world_group)StatelessProcessGroup)GuidedDecodingParams)get_open_port)PyHcclCommunicatorspawnVLLM_WORKER_MULTIPROC_METHODc                   V    e Zd ZdZdZdZdedededdfdZded	ed
e	e   ddfdZ
ddZy)WeightSyncWorkerExtensiona  
    A vLLM worker extension that enables weight synchronization between a client and multiple server workers.

    This worker uses a `StatelessProcessGroup` to establish communication and a `PyNcclCommunicator` to handle
    efficient GPU-based communication using NCCL. The primary purpose of this class is to receive updated model weights
    from a client process and distribute them to all worker processes participating in model inference.
    Nhostport
world_sizereturnc                     | j                   t        d      t               j                  }t	        j
                  ||||      }t        || j                        | _         |dz
  | _        y)aG  
        Initializes the weight update communicator using a stateless process group.

        This method creates a `StatelessProcessGroup` that allows external training processes to communicate with vLLM
        workers without interfering with the global torch distributed group.

        Args:
            host (`str`):
                Hostname or IP address of the master node.
            port (`int`):
                Port number to be used for communication.
            world_size (`int`):
                Total number of participating processes in the update group.
        NzGWeight update group already initialized. Call close_communicator first.)r    r!   rankr"   )device   )	pynccl_commRuntimeErrorr   r%   r   creater   r&   client_rank)selfr    r!   r"   r%   pgs         Q/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/trl/scripts/vllm_serve.pyinit_communicatorz+WeightSyncWorkerExtension.init_communicatorS   sh     'hii  %% #))t$TV`a .bE &>    namedtypeshapec                    | j                   t        d      t        t        |j	                  d      d         }t        j
                  ||| j                        }| j                   j                  || j                         | j                   j                  j                          | j                  j                  j                  ||fg       y)a  
        Receives updated weights from the client process and updates the named parameter in the model.

        Args:
            name (`str`):
                Name of the weight tensor being updated.
            dtype (`str`):
                Data type of the weight tensor as a string (e.g., `"torch.float32"`).
            shape (`Sequence[int]`):
                Shape of the weight tensor.
        Nz=Communicator not initialized. Call `init_communicator` first..)r2   r&   )src)weights)r(   r)   getattrtorchsplitemptyr&   	broadcastr+   groupbarriermodel_runnermodelload_weights)r,   r1   r2   r3   weights        r.   update_named_paramz,WeightSyncWorkerExtension.update_named_paramq   s     #^__u{{3/34U%D 	""6t/?/?"@&&( 	,,tVn5E,Fr0   c                 >    | j                   | ` d| _         d| _        yy)z
        Closes the communicator when weight synchronization is no longer needed.

        This method deletes the NCCL communicator to release associated resources.
        N)r(   r+   )r,   s    r.   close_communicatorz,WeightSyncWorkerExtension.close_communicator   s+     ' #D#D (r0   )r#   N)__name__
__module____qualname____doc__r(   r+   strintr/   r   rD   rF    r0   r.   r   r   F   sd     KK*c * *# *$ *<Gs G3 Gx} GQU G4
$r0   r   c                      e Zd ZU dZ eddi      Zeed<    edddi      Ze	e   ed	<    ed
ddi      Z
eed<    ed
ddi      Zeed<    edddi      Zeed<    edddi      Zeed<    edddi      Zeed<    edddi      Zeed<    edddi      Ze	e   ed<    edddi      Ze	e   ed<    eddd i      Ze	e   ed!<    eddd"i      Zeed#<    eddd$i      Zeed%<    ed&dd'i      Zeed(<   y))ScriptArgumentsa  
    Arguments for the script.

    Args:
        model (`str`):
            Model name or path to load the model from.
        revision (`str` or `None`, *optional*, defaults to `None`):
            Revision to use for the model. If not specified, the default branch will be used.
        tensor_parallel_size (`int`, *optional*, defaults to `1`):
            Number of tensor parallel workers to use.
        data_parallel_size (`int`, *optional*, defaults to `1`):
            Number of data parallel workers to use.
        host (`str`, *optional*, defaults to `"0.0.0.0"`):
            Host address to run the server on.
        port (`int`, *optional*, defaults to `8000`):
            Port to run the server on.
        gpu_memory_utilization (`float`, *optional*, defaults to `0.9`):
            Ratio (between 0 and 1) of GPU memory to reserve for the model weights, activations, and KV cache on the
            device dedicated to generation powered by vLLM. Higher values will increase the KV cache size and thus
            improve the model's throughput. However, if the value is too high, it may cause out-of-memory (OOM) errors
            during initialization.
        dtype (`str`, *optional*, defaults to `"auto"`):
            Data type to use for vLLM generation. If set to `"auto"`, the data type will be automatically determined
            based on the model configuration. Find the supported values in the vLLM documentation.
        max_model_len (`int` or `None`, *optional*, defaults to `None`):
            If set, the `max_model_len` to use for vLLM. This can be useful when running with reduced
            `vllm_gpu_memory_utilization`, leading to a reduced KV cache size. If not set, vLLM will use the model
            context size, which might be much larger than the KV cache, leading to inefficiencies.
        enable_prefix_caching (`bool` or `None`, *optional*, defaults to `None`):
            Whether to enable prefix caching in vLLM. If set to `True`, ensure that the model and the hardware support
            this feature.
        enforce_eager (`bool`, *optional*, defaults to `False`):
            Whether to enforce eager execution. If set to `True`, we will disable CUDA graph and always execute the
            model in eager mode. If `False` (default behavior), we will use CUDA graph and eager execution in hybrid.
        kv_cache_dtype (`str`, *optional*, defaults to `"auto"`):
            Data type to use for KV cache. If set to `"auto"`, the dtype will default to the model data type.
        trust_remote_code (`bool`, *optional*, defaults to `False`):
            Whether to trust remote code when loading models. Set to `True` to allow executing code from model
            repositories. This is required for some custom models but introduces security risks.
        log_level (`str`, *optional*, defaults to `"info"`):
            Log level for uvicorn. Possible choices: `"critical"`, `"error"`, `"warning"`, `"info"`, `"debug"`,
            `"trace"`.
    helpz*Model name or path to load the model from.)metadatarA   NzQRevision to use for the model. If not specified, the default branch will be used.)defaultrQ   revisionr'   z)Number of tensor parallel workers to use.tensor_parallel_sizez'Number of data parallel workers to use.data_parallel_sizez0.0.0.0z"Host address to run the server on.r    i@  zPort to run the server on.r!   g?aQ  Ratio (between 0 and 1) of GPU memory to reserve for the model weights, activations, and KV cache on the device dedicated to generation powered by vLLM. Higher values will increase the KV cache size and thus improve the model's throughput. However, if the value is too high, it may cause out-of-memory (OOM) errors during initialization.gpu_memory_utilizationautozData type to use for vLLM generation. If set to 'auto', the data type will be automatically determined based on the model configuration. Find the supported values in the vLLM documentation.r2   a  If set, the `max_model_len` to use for vLLM. This can be useful when running with reduced `vllm_gpu_memory_utilization`, leading to a reduced KV cache size. If not set, vLLM will use the model context size, which might be much larger than the KV cache, leading to inefficiencies.max_model_lenzxWhether to enable prefix caching in vLLM. If set to `True`, ensure that the model and the hardware support this feature.enable_prefix_cachingFzWhether to enforce eager execution. If set to `True`, we will disable CUDA graph and always execute the model in eager mode. If `False` (default behavior), we will use CUDA graph and eager execution in hybrid.enforce_eagerz_Data type to use for KV cache. If set to 'auto', the dtype will default to the model data type.kv_cache_dtypezWhether to trust remote code when loading models. Set to True to allow executing code from model repositories. This is required for some custom models but introduces security risks.trust_remote_codeinfozbLog level for uvicorn. Possible choices: 'critical', 'error', 'warning', 'info', 'debug', 'trace'.	log_level)rG   rH   rI   rJ   r   rA   rK   __annotations__rS   r   rT   rL   rU   r    r!   rV   floatr2   rX   rY   boolrZ   r[   r\   r^   rM   r0   r.   rO   rO      s   *X FGE3  $mnHhsm  !&EF!#  $CD  >?D#  67D#  %* @
%E   p
E3  $) e
$M8C=  -2 -
-8D>  %* #
%M8D>   u
NC  $ c
t   
Is r0   rO   script_argsdata_parallel_rankmaster_port
connectionr#   c                 F   t        |      t        j                  d<   t        |      t        j                  d<   t        | j                        t        j                  d<   t        |      t        j                  d<   t	        | j
                  | j                  | j                  | j                  | j                  | j                  | j                  | j                  | j                  d| j                        }|j                  ddi       	 	 |j!                         }|d   dv rW|d   }|j'                  dd      |j'                  di       }}t)        ||      }	 |	|i |}
|d   dk(  r|j                  |
       n	|d   dk(  ry y# t"        $ r |j%                  d	
       Y y w xY w)NVLLM_DP_RANKVLLM_DP_RANK_LOCALVLLM_DP_SIZEVLLM_DP_MASTER_PORTz0trl.scripts.vllm_serve.WeightSyncWorkerExtension)rA   rS   rT   rV   rZ   r2   rY   r[   rX   worker_extension_clsr\   statusreadyrF   )methodtype)callfire_and_forgetrn   argsrM   kwargsrp   shutdown)rK   osenvironrU   r   rA   rS   rT   rV   rZ   r2   rY   r[   rX   r\   sendrecvKeyboardInterruptcollective_rpcgetr9   )rb   rc   rd   re   llmcommandmethod_namerr   rs   rn   results              r.   
llm_workerr     s    "%%7!8BJJ~'*+='>BJJ#$!$[%C%C!DBJJ~(+K(8BJJ$%
%%(==*AA!// *??"11!//O%77C$ OOXw'(
	 oo'G 6?99!(+K";;vr2GKK"4M&DS+.FT,V,Fv&('V_
*#  ! 	&:;	s   
F F F lstnc           
          t        t        |       |      \  }}t        |      D cg c]-  }| ||z  t        ||      z   |dz   |z  t        |dz   |      z    / c}S c c}w )aI  
    Split list `lst` into `n` evenly distributed sublists.

    Example:
    ```python
    >>> chunk_list([1, 2, 3, 4, 5, 6], 2)
    [[1, 2, 3], [4, 5, 6]]

    >>> chunk_list([1, 2, 3, 4, 5, 6], 4)
    [[1, 2], [3, 4], [5], [6]]

    >>> chunk_list([1, 2, 3, 4, 5, 6], 8)
    [[1], [2], [3], [4], [5], [6], [], []]
    ```
    r'   )divmodlenrangemin)r   r   kris        r.   
chunk_listr   K  s^      #c(ADAqJOPQ(SQCAAq	!QUaK#a!eQ-$?@SSSs   2Ac                 J    t               st        d      t               st        d      t               st        d      t	               st        d      t               }g g t         j                        D ]V  }t               \  }}t        t         |||f      }|j                          j                  |       j                  |       X t        dt        f fd       }t        |      }|j                  d	      d
        }|j                  d       fd       }	 G d dt               }
 G d dt               }|j#                  d|      d|
f fd       } G d dt               }|j#                  d      d|f fd       } G d dt               }|j#                  d      d|ffd       }|j#                  d      fd       }|j#                  d      fd        }t%        j&                  | j(                   j*                   j,                  !       y )"Nz`FastAPI is required to run the vLLM serve script. Please install it using `pip install fastapi`.zbPydantic is required to run the vLLM serve script. Please install it using `pip install pydantic`.z`Uvicorn is required to run the vLLM serve script. Please install it using `pip install uvicorn`.zZvLLM is required to run the vLLM serve script. Please install it using `pip install vllm`.)targetrr   appc                  K   t               }t        |      j                  k  rgD ]I  }|j                         }t	        |t
              s$|j                  d      dk(  s9|j                  |       K t        |      j                  k  rgd  D ]^  }|j                  d       |j                         s&t        j                  d| d       |j                          |j                          ` y w)Nrl   rm   
   )timeoutzProcess z< is still alive after 10 seconds, attempting to terminate...)setr   rU   rx   
isinstancedictr{   addjoinis_aliveloggerwarning	terminate)r   ready_connectionsre   msgprocessconnections	processesrb   s        r.   lifespanzmain.<locals>.lifespan}  s       E#${'E'EE) 6
 oo'c4(SWWX->'-I%))*56 #${'E'EE 	 ! 	GLLL$!'2nop!!#	s   AC6C6!+C6,C6:<C6)r   z/health/c                     K   ddiS w)zM
        Health check endpoint to verify that the server is running.
        rl   okrM   rM   r0   r.   healthzmain.<locals>.health  s     
 $s   z/get_world_size/c                  B   K   d j                    j                  z  iS w)a"  
        Retrieves the world size of the LLM engine, which is `tensor_parallel_size * data_parallel_size`.

        Returns:
            `dict`:
                A dictionary containing the world size.

        Example response:
        ```json
        {"world_size": 8}
        ```
        r"   )rT   rU   )rb   s   r.   get_world_sizezmain.<locals>.get_world_size  s%      k>>A_A__``s   c                       e Zd ZU ee   ed<   dZeed<   dZe	ed<   dZ
e	ed<   dZe	ed<   dZeed	<   d
Ze	ed<   dZeed<   dZee   ed<    ee      Zeed<   y)main.<locals>.GenerateRequestpromptsr'   r   g      ?repetition_penaltytemperaturetop_pr6   top_kg        min_p   
max_tokensNguided_decoding_regex)default_factorygeneration_kwargs)rG   rH   rI   listrK   r_   r   rL   r   r`   r   r   r   r   r   r   r   r   r   r   rM   r0   r.   GenerateRequestr     sp    c3
$'E' U usu
C/3x}3"'"=4=r0   r   c                   $    e Zd ZU eee      ed<   y)main.<locals>.GenerateResponsecompletion_idsN)rG   rH   rI   r   rL   r_   rM   r0   r.   GenerateResponser     s    T#Y'r0   r   z
/generate/)response_modelrequestc           	      8  K   | j                   t        d| j                         }nd}| j                  | j                  | j                  | j
                  | j                  | j                  | j                  |d}|j                  | j                         t        di |}t        | j                  j                        }t        |      D ]$  \  }}|sdg}||d}|j!                  dd|d	       & D cg c]  }|j#                          }}t        ||      D 	cg c]
  \  }	}|s	|	 }}	}t%        t'        j(                  |            }|D 
	cg c](  }
|
j*                  D ]  }	t%        |	j,                         * }}
}	d
|iS c c}w c c}}	w c c}	}
w w)a  
        Generates completions for the provided prompts.

        Args:
            request (`GenerateRequest`):
                - `prompts` (list of `str`): A list of prompts (text strings) for the model to generate completions.
                - `n` (`int`, *optional*, defaults to `1`): Number of completions to generate for each prompt.
                - `repetition_penalty` (`float`, *optional*, defaults to `1.0`): Repetition penalty to apply during generation.
                - `temperature` (`float`, *optional*, defaults to `1.0`): Temperature for sampling. Higher values lead to more random outputs.
                - `top_p` (`float`, *optional*, defaults to `1.0`): Top-p (nucleus) sampling parameter. It controls the diversity of the generated text.
                - `top_k` (`int`, *optional*, defaults to `-1`): Top-k sampling parameter. If set to `-1`, it disables top-k sampling.
                - `min_p` (`float`, *optional*, defaults to `0.0`): Minimum probability threshold for sampling.
                - `max_tokens` (`int`, *optional*, defaults to `16`): Maximum number of tokens to generate for each completion.
                - `guided_decoding_regex` (`str`, *optional*): A regex pattern for guided decoding. If provided, the model will only generate tokens that match this regex pattern.
                - `generation_kwargs` (`dict`, *optional*): Additional generation parameters to pass to the vLLM `SamplingParams`. This can include parameters like `seed`, `frequency_penalty`, etc. If it contains keys that conflict with the other parameters, they will override them.

        Returns:
            `GenerateResponse`:
                - `completion_ids` (list of list of `int`): A list of lists of token IDs for each generated completion.

        Example request:
        ```json
        {"prompts": ["Hello world", "What is AI?"]}
        ```

        Example response:
        ```json
        {"completion_ids": [[101, 102, 103], [201, 202, 203]]}
        ```
        Noutlines)backendregex)r   r   r   r   r   r   r   guided_decodingz<placeholder>)r   sampling_paramsrp   generatero   rn   rs   r   rM   )r   r   r   r   r   r   r   r   r   updater   r   r   r   rU   ziprw   rx   r   r   from_iterableoutputs	token_ids)r   r   r   r   chunked_promptsre   r   rs   all_outputsoutputr   r   r   rb   s               r.   r   zmain.<locals>.generate  s    D ((42:WMjMjkO"O ")"<"<"..]]]]]]!,,.	
 	  !:!:;(=+<= %W__k6T6TU $'{O#D 	VJ *+!(_MFOOVzVTU	V <GGZz(GG 69o5Vb/&'Zavbb 5..{;<@KjWZaZiZijPV$v//0j0jj .11 H c ks0   C5F8F	F 
F+F/&F-FFc                   ,    e Zd ZU eed<   eed<   eed<   y)%main.<locals>.InitCommunicatorRequestr    r!   r"   N)rG   rH   rI   rK   r_   rL   rM   r0   r.   InitCommunicatorRequestr     s    		r0   r   z/init_communicator/c                    K   j                   j                  z  dz   }d| j                  | j                  |fd}D ]  }|j	                  dd|d        ddiS w)	a  
        Initializes the communicator for synchronizing model weights between a client and multiple server workers.

        Args:
            request (`InitCommunicatorRequest`):
                - `host` (`str`): Hostname or IP address of the master node.
                - `port` (`int`): Port number to be used for communication.
                - `world_size` (`int`): Total number of participating processes in the group.
        r'   r/   rn   rr   rq   rz   r   messagez+Request received, initializing communicator)rT   rU   r    r!   rw   )r   r"   rs   re   r   rb   s       r.   r/   zmain.<locals>.init_communicator  sv      !558V8VVYZZ

 0',,V`9ab% 	gJOO%6BR^def	g HIIs   AAc                   2    e Zd ZU eed<   eed<   ee   ed<   y)"main.<locals>.UpdateWeightsRequestr1   r2   r3   N)rG   rH   rI   rK   r_   r   rL   rM   r0   r.   UpdateWeightsRequestr   !  s    	
Cyr0   r   z/update_named_param/c                    K   d| j                   | j                  t        | j                        fd}D ]  }|j	                  dd|d        ddiS w)a  
        Updates the model weights with the provided tensor.

        Once this endpoint is called, the client process should broadcast the updated weights to all server workers.

        Args:
            request (`UpdateWeightsRequest`):
                - `name` (`str`): Name of the weight tensor being updated.
                - `dtype` (`str`): Data type of the weight tensor (e.g., `"torch.float32"`).
                - `shape` (list of `int`): Shape of the weight

        rD   r   rq   rz   r   r   z*Request received, updating named parameter)r1   r2   tupler3   rw   )r   rs   re   r   s      r.   rD   z main.<locals>.update_named_param&  sd     " 17<<X]^e^k^kXl:mn% 	gJOO%6BR^def	g GHHs   AAz/reset_prefix_cache/c                     K   D ]  } | j                  ddd        D  cg c]  } | j                          }} t        d |D              }ddt        |      z   iS c c} w w)z8
        Resets the prefix cache for the model.
        rp   reset_prefix_cache)ro   rn   c              3       K   | ]  }|  y wNrM   ).0r   s     r.   	<genexpr>z3main.<locals>.reset_prefix_cache.<locals>.<genexpr>F  s     7f7s   r   z1Request received, resetting prefix cache status: )rw   rx   allrK   )re   r   successr   s      r.   r   z main.<locals>.reset_prefix_cache=  sw     
 & 	NJOOV7KLM	N <GGZz(GG7;77NQTU\Q]]^^ Hs    A#A)A#z/close_communicator/c                  T   K   ddi} D ]  }|j                  dd| d        ddiS w)zT
        Closes the weight update group and cleans up associated resources.
        rn   rF   rq   rz   r   r   z&Request received, closing communicator)rw   )rs   re   r   s     r.   rF   z main.<locals>.close_communicatorI  sG     
 01% 	gJOO%6BR^def	gCDDs   %()r    r!   r^   )r   ImportErrorr   r   r   r   r   rU   r   r	   r   startappendr   r   r{   r   postuvicornrunr    r!   r^   )rb   rd   rc   parent_connectionchild_connectionr   r   r   r   r   r   r   r   r   r/   r   rD   r   rF   r   r   s   `                  @@r.   mainr   _  sP   !n
 	
 !"p
 	
  !n
 	
 vww  /KKI#K$B$BC ".2f++;@RT_aq2rs,-!" G  & 8
$C 	WWZ    	WW a !a
>) 
>(9 ( 	XXl+;X<I2 I2 =I2V) 
 	XX#$J)@ J %J*y 
 	XX$%I*> I &I, 	XX$%	_ &	_ 	XX$%E &E KK+**1A1A[MbMbcr0   
subparsersc                 \    | | j                  ddt              }|S t        t              }|S )Nz
vllm-servezRun the vLLM serve script)rP   dataclass_types)
add_parserrO   r   )r   parsers     r.   make_parserr   W  s9    &&|:Ugv&w M ?+Mr0   __main__r   )@argparseloggingru   collections.abcr   
contextlibr   dataclassesr   r   	itertoolsr   multiprocessingr   r	   multiprocessing.connectionr
   typingr   r:   trlr   trl.import_utilsr   r   r   r   r   fastapir   pydanticr   r   vllmr   r   ,vllm.distributed.device_communicators.pyncclr   vllm.distributed.parallel_stater   vllm.distributed.utilsr   vllm.sampling_paramsr   
vllm.utilsr   3vllm_ascend.distributed.device_communicators.pyhcclr   	getLoggerrG   r   rv   r   rO   rL   r   r   r   r   _SubParsersActionr   r   parse_args_and_configrb   rM   r0   r.   <module>r     si     	 $ * (  ) 1      "  (O?<9(!p 
		8	$
 .5

) *O$ O$d ~ ~ ~B. .69.HK.Yc.	.bTD TS TT$Z T(udo udpH66  z]F113N[ r0   