Skip to content

vllm.entrypoints.pooling.pooling.protocol

PoolingRequest module-attribute

T module-attribute

T = TypeVar('T')

IOProcessorRequest

Bases: OpenAIBaseModel, Generic[T]

Source code in vllm/entrypoints/pooling/pooling/protocol.py
class IOProcessorRequest(OpenAIBaseModel, Generic[T]):
    model: str | None = None

    priority: int = Field(default=0)
    """
    The priority of the request (lower means earlier handling;
    default: 0). Any priority other than 0 will raise an error
    if the served model does not use priority scheduling.
    """
    data: T

    task: PoolingTask = "plugin"
    encoding_format: EncodingFormat = "float"
    embed_dtype: EmbedDType = Field(
        default="float32",
        description=(
            "What dtype to use for encoding. Default to using float32 for base64 "
            "encoding to match the OpenAI python client behavior. "
            "This parameter will affect base64 and binary_response."
        ),
    )
    endianness: Endianness = Field(
        default="native",
        description=(
            "What endianness to use for encoding. Default to using native for "
            "base64 encoding to match the OpenAI python client behavior."
            "This parameter will affect base64 and binary_response."
        ),
    )

    def to_pooling_params(self):
        return PoolingParams()

data instance-attribute

data: T

embed_dtype class-attribute instance-attribute

embed_dtype: EmbedDType = Field(
    default="float32",
    description="What dtype to use for encoding. Default to using float32 for base64 encoding to match the OpenAI python client behavior. This parameter will affect base64 and binary_response.",
)

encoding_format class-attribute instance-attribute

encoding_format: EncodingFormat = 'float'

endianness class-attribute instance-attribute

endianness: Endianness = Field(
    default="native",
    description="What endianness to use for encoding. Default to using native for base64 encoding to match the OpenAI python client behavior.This parameter will affect base64 and binary_response.",
)

model class-attribute instance-attribute

model: str | None = None

priority class-attribute instance-attribute

priority: int = Field(default=0)

The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.

task class-attribute instance-attribute

task: PoolingTask = 'plugin'

to_pooling_params

to_pooling_params()
Source code in vllm/entrypoints/pooling/pooling/protocol.py
def to_pooling_params(self):
    return PoolingParams()

IOProcessorResponse

Bases: OpenAIBaseModel, Generic[T]

Source code in vllm/entrypoints/pooling/pooling/protocol.py
class IOProcessorResponse(OpenAIBaseModel, Generic[T]):
    request_id: str | None = None
    """
    The request_id associated with this response
    """
    created_at: int = Field(default_factory=lambda: int(time.time()))

    data: T
    """
    When using plugins IOProcessor plugins, the actual output is generated
    by the plugin itself. Hence, we use a generic type for the response data
    """

created_at class-attribute instance-attribute

created_at: int = Field(default_factory=lambda: int(time()))

data instance-attribute

data: T

When using plugins IOProcessor plugins, the actual output is generated by the plugin itself. Hence, we use a generic type for the response data

request_id class-attribute instance-attribute

request_id: str | None = None

The request_id associated with this response

PoolingBytesResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/pooling/protocol.py
class PoolingBytesResponse(OpenAIBaseModel):
    body: list[bytes]
    metadata: str
    media_type: str = "application/octet-stream"

body instance-attribute

body: list[bytes]

media_type class-attribute instance-attribute

media_type: str = 'application/octet-stream'

metadata instance-attribute

metadata: str

PoolingChatRequest

Bases: EmbeddingChatRequest

Source code in vllm/entrypoints/pooling/pooling/protocol.py
class PoolingChatRequest(EmbeddingChatRequest):
    task: PoolingTask | None = None
    softmax: bool | None = Field(
        default=None,
        description="softmax will be deprecated, please use use_activation instead.",
    )
    activation: bool | None = Field(
        default=None,
        description="activation will be deprecated, please use use_activation instead.",
    )
    use_activation: bool | None = Field(
        default=None,
        description="Whether to use activation for classification outputs. "
        "If it is a classify or token_classify task, the default is True; "
        "for other tasks, this value should be None.",
    )

    def to_pooling_params(self):
        return PoolingParams(
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            dimensions=self.dimensions,
            normalize=self.normalize,
            use_activation=get_use_activation(self),
        )

activation class-attribute instance-attribute

activation: bool | None = Field(
    default=None,
    description="activation will be deprecated, please use use_activation instead.",
)

softmax class-attribute instance-attribute

softmax: bool | None = Field(
    default=None,
    description="softmax will be deprecated, please use use_activation instead.",
)

task class-attribute instance-attribute

task: PoolingTask | None = None

use_activation class-attribute instance-attribute

use_activation: bool | None = Field(
    default=None,
    description="Whether to use activation for classification outputs. If it is a classify or token_classify task, the default is True; for other tasks, this value should be None.",
)

to_pooling_params

to_pooling_params()
Source code in vllm/entrypoints/pooling/pooling/protocol.py
def to_pooling_params(self):
    return PoolingParams(
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        dimensions=self.dimensions,
        normalize=self.normalize,
        use_activation=get_use_activation(self),
    )

PoolingCompletionRequest

Bases: EmbeddingCompletionRequest

Source code in vllm/entrypoints/pooling/pooling/protocol.py
class PoolingCompletionRequest(EmbeddingCompletionRequest):
    task: PoolingTask | None = None
    softmax: bool | None = Field(
        default=None,
        description="softmax will be deprecated, please use use_activation instead.",
    )
    activation: bool | None = Field(
        default=None,
        description="activation will be deprecated, please use use_activation instead.",
    )
    use_activation: bool | None = Field(
        default=None,
        description="Whether to use activation for classification outputs. "
        "If it is a classify or token_classify task, the default is True; "
        "for other tasks, this value should be None.",
    )

    def to_pooling_params(self):
        return PoolingParams(
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            dimensions=self.dimensions,
            normalize=self.normalize,
            use_activation=get_use_activation(self),
        )

activation class-attribute instance-attribute

activation: bool | None = Field(
    default=None,
    description="activation will be deprecated, please use use_activation instead.",
)

softmax class-attribute instance-attribute

softmax: bool | None = Field(
    default=None,
    description="softmax will be deprecated, please use use_activation instead.",
)

task class-attribute instance-attribute

task: PoolingTask | None = None

use_activation class-attribute instance-attribute

use_activation: bool | None = Field(
    default=None,
    description="Whether to use activation for classification outputs. If it is a classify or token_classify task, the default is True; for other tasks, this value should be None.",
)

to_pooling_params

to_pooling_params()
Source code in vllm/entrypoints/pooling/pooling/protocol.py
def to_pooling_params(self):
    return PoolingParams(
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        dimensions=self.dimensions,
        normalize=self.normalize,
        use_activation=get_use_activation(self),
    )

PoolingResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/pooling/protocol.py
class PoolingResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"pool-{random_uuid()}")
    object: str = "list"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    data: list[PoolingResponseData]
    usage: UsageInfo

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

data instance-attribute

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"pool-{random_uuid()}"
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: str = 'list'

usage instance-attribute

usage: UsageInfo

PoolingResponseData

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/pooling/pooling/protocol.py
class PoolingResponseData(OpenAIBaseModel):
    index: int
    object: str = "pooling"
    data: list[list[float]] | list[float] | str

data instance-attribute

data: list[list[float]] | list[float] | str

index instance-attribute

index: int

object class-attribute instance-attribute

object: str = 'pooling'