
    JTh]                         S SK Jr  S SKrS SKJr  S SKJrJr  S SKJ	r	  SSK
Jr  SS	/r " S
 S\5      r " S S	\5      rg)    )OptionalN)Tensor)
functionalinit)	Parameter   )Module	EmbeddingEmbeddingBagc                   D  ^  \ rS rSr% Sr/ SQr\\S'   \\S'   \\   \S'   \\	   \S'   \	\S'   \
\S	'   \\S
'   \
\S'   \
\S'            SS\S\S\\   S\\	   S\	S	\
S\
S\\   S\
SS4U 4S jjjrSS jrSS jrS\S\4S jrS\4S jr\      SS j5       rSrU =r$ )r
      a]  A simple lookup table that stores embeddings of a fixed dictionary and size.

This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding
word embeddings.

Args:
    num_embeddings (int): size of the dictionary of embeddings
    embedding_dim (int): the size of each embedding vector
    padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
                                 therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
                                 i.e. it remains as a fixed "pad". For a newly constructed Embedding,
                                 the embedding vector at :attr:`padding_idx` will default to all zeros,
                                 but can be updated to another value to be used as the padding vector.
    max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
                                is renormalized to have norm :attr:`max_norm`.
    norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
    scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of
                                            the words in the mini-batch. Default ``False``.
    sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor.
                             See Notes for more details regarding sparse gradients.

Attributes:
    weight (Tensor): the learnable weights of the module of shape (num_embeddings, embedding_dim)
                     initialized from :math:`\mathcal{N}(0, 1)`

Shape:
    - Input: :math:`(*)`, IntTensor or LongTensor of arbitrary shape containing the indices to extract
    - Output: :math:`(*, H)`, where `*` is the input shape and :math:`H=\text{embedding\_dim}`

.. note::
    Keep in mind that only a limited number of optimizers support
    sparse gradients: currently it's :class:`optim.SGD` (`CUDA` and `CPU`),
    :class:`optim.SparseAdam` (`CUDA` and `CPU`) and :class:`optim.Adagrad` (`CPU`)

.. note::
    When :attr:`max_norm` is not ``None``, :class:`Embedding`'s forward method will modify the
    :attr:`weight` tensor in-place. Since tensors needed for gradient computations cannot be
    modified in-place, performing a differentiable operation on ``Embedding.weight`` before
    calling :class:`Embedding`'s forward method requires cloning ``Embedding.weight`` when
    :attr:`max_norm` is not ``None``. For example::

        n, d, m = 3, 5, 7
        embedding = nn.Embedding(n, d, max_norm=1.0)
        W = torch.randn((m, d), requires_grad=True)
        idx = torch.tensor([1, 2])
        a = embedding.weight.clone() @ W.t()  # weight must be cloned for this to be differentiable
        b = embedding(idx) @ W.t()  # modifies weight in-place
        out = (a.unsqueeze(0) + b.unsqueeze(1))
        loss = out.sigmoid().prod()
        loss.backward()

Examples::

    >>> # an Embedding module containing 10 tensors of size 3
    >>> embedding = nn.Embedding(10, 3)
    >>> # a batch of 2 samples of 4 indices each
    >>> input = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
    >>> embedding(input)
    tensor([[[-0.0251, -1.6902,  0.7172],
             [-0.6431,  0.0748,  0.6969],
             [ 1.4970,  1.3448, -0.9685],
             [-0.3677, -2.7265, -0.1685]],

            [[ 1.4970,  1.3448, -0.9685],
             [ 0.4362, -0.4004,  0.9400],
             [-0.6431,  0.0748,  0.6969],
             [ 0.9124, -2.3616,  1.1151]]])


    >>> # example with padding_idx
    >>> embedding = nn.Embedding(10, 3, padding_idx=0)
    >>> input = torch.LongTensor([[0, 2, 0, 5]])
    >>> embedding(input)
    tensor([[[ 0.0000,  0.0000,  0.0000],
             [ 0.1535, -2.0309,  0.9315],
             [ 0.0000,  0.0000,  0.0000],
             [-0.1655,  0.9897,  0.0635]]])

    >>> # example of changing `pad` vector
    >>> padding_idx = 0
    >>> embedding = nn.Embedding(3, 3, padding_idx=padding_idx)
    >>> embedding.weight
    Parameter containing:
    tensor([[ 0.0000,  0.0000,  0.0000],
            [-0.7895, -0.7089, -0.0364],
            [ 0.6778,  0.5803,  0.2678]], requires_grad=True)
    >>> with torch.no_grad():
    ...     embedding.weight[padding_idx] = torch.ones(3)
    >>> embedding.weight
    Parameter containing:
    tensor([[ 1.0000,  1.0000,  1.0000],
            [-0.7895, -0.7089, -0.0364],
            [ 0.6778,  0.5803,  0.2678]], requires_grad=True)
)num_embeddingsembedding_dimpadding_idxmax_norm	norm_typescale_grad_by_freqsparser   r   r   r   r   r   weightfreezer   N_weight_freezereturnc                   > XS.n[         TU ]  5         Xl        X l        UbI  US:  a  X0R                  :  d   S5       eO,US:  a&  X0R                  * :  d   S5       eU R                  U-   nX0l        X@l        XPl        X`l        Uc;  [        [        R                  " X440 UD6U	(       + S9U l        U R                  5         O5[        UR                  5      UU/:X  d   S5       e[        X(       + S9U l        Xpl        g )Ndevicedtyper   z)Padding_idx must be within num_embeddings)requires_grad?Shape of weight does not match num_embeddings and embedding_dim)super__init__r   r   r   r   r   r   r   torchemptyr   reset_parameterslistshaper   )selfr   r   r   r   r   r   r   r   r   r   r   factory_kwargs	__class__s                O/var/www/auris/envauris/lib/python3.13/site-packages/torch/nn/modules/sparse.pyr!   Embedding.__init__   s    %+;,*"Q"5"55?>?5q$7$7#77?>?7"11K?& ""4?#^;N~N")kDK !!#&+  Q QQ  $G;GDK    c                 d    [         R                  " U R                  5        U R                  5         g Nr   normal_r   _fill_padding_idx_with_zeror'   s    r*   r$   Embedding.reset_parameters       T[[!((*r,   c                     U R                   bG  [        R                  " 5          U R                  U R                      R	                  S5        S S S 5        g g ! , (       d  f       g = fNr   r   r"   no_gradr   fill_r2   s    r*   r1   %Embedding._fill_padding_idx_with_zero   F    'D,,-33A6 ! (    )A
A$inputc           	          [         R                  " UU R                  U R                  U R                  U R
                  U R                  U R                  5      $ r.   )F	embeddingr   r   r   r   r   r   )r'   r=   s     r*   forwardEmbedding.forward   sD    {{KKMMNN##KK
 	
r,   c                     SnU R                   b  US-  nU R                  b  US-  nU R                  S:w  a  US-  nU R                  SLa  US-  nU R                  SLa  US-  nUR
                  " S	0 U R                  D6$ )
N!{num_embeddings}, {embedding_dim}, padding_idx={padding_idx}, max_norm={max_norm}   , norm_type={norm_type}F), scale_grad_by_freq={scale_grad_by_freq}z, sparse=True )r   r   r   r   r   format__dict__)r'   ss     r*   
extra_reprEmbedding.extra_repr   s    /'..A==$((A>>Q**A""%/<<A;;e# Axx($--((r,   c                 t    UR                  5       S:X  d   S5       eUR                  u  pU " UU	UUUUUUUS9	n
U
$ )a  Create Embedding instance from given 2-dimensional FloatTensor.

Args:
    embeddings (Tensor): FloatTensor containing weights for the Embedding.
        First dimension is being passed to Embedding as ``num_embeddings``, second as ``embedding_dim``.
    freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
        Equivalent to ``embedding.weight.requires_grad = False``. Default: ``True``
    padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
                                 therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
                                 i.e. it remains as a fixed "pad".
    max_norm (float, optional): See module initialization documentation.
    norm_type (float, optional): See module initialization documentation. Default ``2``.
    scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
    sparse (bool, optional): See module initialization documentation.

Examples::

    >>> # FloatTensor containing pretrained weights
    >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
    >>> embedding = nn.Embedding.from_pretrained(weight)
    >>> # Get embeddings for index 1
    >>> input = torch.LongTensor([1])
    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
    >>> embedding(input)
    tensor([[ 4.0000,  5.1000,  6.3000]])
rG   4Embeddings parameter is expected to be 2-dimensional)	r   r   r   r   r   r   r   r   r   )dimr&   )cls
embeddingsr   r   r   r   r   r   rowscolsr@   s              r*   from_pretrainedEmbedding.from_pretrained   s`    L NN!	BA	B!%%
#1

	 r,   )r   r   r   r   r   r   r   r   )	NN       @FFNFNNr   N)TNNrY   FF)__name__
__module____qualname____firstlineno____doc____constants__int__annotations__r   floatboolr   r!   r$   r1   rA   strrN   classmethodrW   __static_attributes____classcell__r)   s   @r*   r
   r
      s6   _BM #uoNLL &*$(#($(-- - c]	-
 5/- - !- - &!- - 
- -^+7
	
V 	
 	
)C )   3 3r,   c                     ^  \ rS rSr% Sr/ SQr\\S'   \\S'   \\	   \S'   \	\S'   \
\S'   \\S	'   \\S
'   \
\S'   \
\S'   \\   \S'             SS\S\S\\	   S\	S\
S
\S\
S\\   S\
S\\   SS4U 4S jjjrSS jrSS jr  SS\S\\   S\\   S\4S jjrS\4S jr\        S S\S\
S\\	   S\	S\
S
\S\
S\
S\\   SS 4S jj5       rSrU =r$ )!r   i  aH  Compute sums or means of 'bags' of embeddings, without instantiating the intermediate embeddings.

For bags of constant length, no :attr:`per_sample_weights`, no indices equal to :attr:`padding_idx`,
and with 2D inputs, this class

    * with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=1)``,
    * with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=1)``,
    * with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=1)``.

However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
operations.

EmbeddingBag also supports per-sample weights as an argument to the forward
pass. This scales the output of the Embedding before performing a weighted
reduction as specified by ``mode``. If :attr:`per_sample_weights` is passed, the
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
:attr:`per_sample_weights`.

Args:
    num_embeddings (int): size of the dictionary of embeddings
    embedding_dim (int): the size of each embedding vector
    max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
                                is renormalized to have norm :attr:`max_norm`.
    norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
    scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of
                                            the words in the mini-batch. Default ``False``.
                                            Note: this option is not supported when ``mode="max"``.
    mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
                             ``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
                             into consideration. ``"mean"`` computes the average of the values
                             in the bag, ``"max"`` computes the max value over each bag.
                             Default: ``"mean"``
    sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
                             Notes for more details regarding sparse gradients. Note: this option is not
                             supported when ``mode="max"``.
    include_last_offset (bool, optional): if ``True``, :attr:`offsets` has one additional element, where the last element
                                  is equivalent to the size of `indices`. This matches the CSR format.
    padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the
                                 gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated
                                 during training, i.e. it remains as a fixed "pad". For a newly constructed
                                 EmbeddingBag, the embedding vector at :attr:`padding_idx` will default to all
                                 zeros, but can be updated to another value to be used as the padding vector.
                                 Note that the embedding vector at :attr:`padding_idx` is excluded from the
                                 reduction.

Attributes:
    weight (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)`
                     initialized from :math:`\mathcal{N}(0, 1)`.

Examples::

    >>> # an EmbeddingBag module containing 10 tensors of size 3
    >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum')
    >>> # a batch of 2 samples of 4 indices each
    >>> input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long)
    >>> offsets = torch.tensor([0, 4], dtype=torch.long)
    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
    >>> embedding_sum(input, offsets)
    tensor([[-0.8861, -5.4350, -0.0523],
            [ 1.1306, -2.5798, -1.0044]])

    >>> # Example with padding_idx
    >>> embedding_sum = nn.EmbeddingBag(10, 3, mode='sum', padding_idx=2)
    >>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9], dtype=torch.long)
    >>> offsets = torch.tensor([0, 4], dtype=torch.long)
    >>> embedding_sum(input, offsets)
    tensor([[ 0.0000,  0.0000,  0.0000],
            [-0.7082,  3.2145, -2.6251]])

    >>> # An EmbeddingBag can be loaded from an Embedding like so
    >>> embedding = nn.Embedding(10, 3, padding_idx=2)
    >>> embedding_sum = nn.EmbeddingBag.from_pretrained(
            embedding.weight,
            padding_idx=embedding.padding_idx,
            mode='sum')
)	r   r   r   r   r   moder   include_last_offsetr   r   r   r   r   r   r   rk   r   rl   r   Nr   r   c                   > XS.n[         TU ]  5         Xl        X l        X0l        X@l        XPl        U
bI  U
S:  a  XR                  :  d   S5       eO,U
S:  a&  XR                  * :  d   S5       eU R                  U
-   n
Xl        Uc7  [        [        R                  " X440 UD65      U l        U R                  5         O2[        UR                  5      UU/:X  d   S5       e[        U5      U l        X`l        Xpl        Xl        g )Nr   r   z)padding_idx must be within num_embeddingsr   )r    r!   r   r   r   r   r   r   r   r"   r#   r   r$   r%   r&   rk   r   rl   )r'   r   r   r   r   r   rk   r   r   rl   r   r   r   r(   r)   s                 r*   r!   EmbeddingBag.__init__r  s    %+;,* ""4"Q"5"55?>?5q$7$7#77?>?7"11K?&?#^;N~NDK !!#&+  Q QQ  $G,DK	#6 r,   c                 d    [         R                  " U R                  5        U R                  5         g r.   r/   r2   s    r*   r$   EmbeddingBag.reset_parameters  r4   r,   c                     U R                   bG  [        R                  " 5          U R                  U R                      R	                  S5        S S S 5        g g ! , (       d  f       g = fr6   r7   r2   s    r*   r1   (EmbeddingBag._fill_padding_idx_with_zero  r;   r<   r=   offsetsper_sample_weightsc                     [         R                  " UU R                  UU R                  U R                  U R
                  U R                  U R                  UU R                  U R                  5      $ )a+  Forward pass of EmbeddingBag.

Args:
    input (Tensor): Tensor containing bags of indices into the embedding matrix.
    offsets (Tensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines
        the starting index position of each bag (sequence) in :attr:`input`.
    per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
        to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
        must have exactly the same shape as input and is treated as having the same
        :attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.

Returns:
    Tensor output shape of `(B, embedding_dim)`.

.. note::

    A few notes about ``input`` and ``offsets``:

    - :attr:`input` and :attr:`offsets` have to be of the same type, either int or long

    - If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences)
      each of fixed length ``N``, and this will return ``B`` values aggregated in a way
      depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case.

    - If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of
      multiple bags (sequences).  :attr:`offsets` is required to be a 1D tensor containing the
      starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` of shape `(B)`,
      :attr:`input` will be viewed as having ``B`` bags. Empty bags (i.e., having 0-length) will have
      returned vectors filled by zeros.
)
r?   embedding_bagr   r   r   r   rk   r   rl   r   )r'   r=   rs   rt   s       r*   rA   EmbeddingBag.forward  s]    H KKMMNN##IIKK$$
 	
r,   c                 H   SnU R                   b  US-  nU R                  S:w  a  US-  nU R                  SLa  US-  nUS-  nU R                  b  US-  nUR                  " S	0 U R
                  R                  5        VVs0 s H  u  p#U[        U5      _M     snnD6$ s  snnf )
NrD   rF   rG   rH   FrI   z, mode={mode}rE   rJ   )r   r   r   r   rK   rL   itemsrepr)r'   rM   kvs       r*   rN   EmbeddingBag.extra_repr  s    /==$((A>>Q**A""%/<<A	_'..AxxI$--2E2E2GH2G$!1d1g:2GHIIHs    BrT   r   c
                     UR                  5       S:X  d   S5       eUR                  u  pU " U
UUUUUUUUU	S9
nU(       + UR                  l        U$ )a  Create EmbeddingBag instance from given 2-dimensional FloatTensor.

Args:
    embeddings (Tensor): FloatTensor containing weights for the EmbeddingBag.
        First dimension is being passed to EmbeddingBag as 'num_embeddings', second as 'embedding_dim'.
    freeze (bool, optional): If ``True``, the tensor does not get updated in the learning process.
        Equivalent to ``embeddingbag.weight.requires_grad = False``. Default: ``True``
    max_norm (float, optional): See module initialization documentation. Default: ``None``
    norm_type (float, optional): See module initialization documentation. Default ``2``.
    scale_grad_by_freq (bool, optional): See module initialization documentation. Default ``False``.
    mode (str, optional): See module initialization documentation. Default: ``"mean"``
    sparse (bool, optional): See module initialization documentation. Default: ``False``.
    include_last_offset (bool, optional): See module initialization documentation. Default: ``False``.
    padding_idx (int, optional): See module initialization documentation. Default: ``None``.

Examples::

    >>> # FloatTensor containing pretrained weights
    >>> weight = torch.FloatTensor([[1, 2.3, 3], [4, 5.1, 6.3]])
    >>> embeddingbag = nn.EmbeddingBag.from_pretrained(weight)
    >>> # Get embeddings for index 1
    >>> input = torch.LongTensor([[1, 0]])
    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
    >>> embeddingbag(input)
    tensor([[ 2.5000,  3.7000,  4.6500]])
rG   rQ   )
r   r   r   r   r   r   rk   r   rl   r   )rR   r&   r   r   )rS   rT   r   r   r   r   rk   r   rl   r   rU   rV   embeddingbags                r*   rW   EmbeddingBag.from_pretrained  ss    P NN!	BA	B!%%
1 3#
 17J)r,   )
r   rl   r   rk   r   r   r   r   r   r   )
NrY   FmeanFNFNNNrZ   )NN)TNrY   Fr   FFN)r[   r\   r]   r^   r_   r`   ra   rb   r   rc   rd   r   re   r!   r$   r1   rA   rN   rf   rW   rg   rh   ri   s   @r*   r   r     s   KZ
M uoN
IL# %)#($($)%).7.7 .7 5/	.7
 .7 !.7 .7 .7 &!.7 ".7 c].7 
.7 .7`+7 %)/3	0
0
 &!0
 %V,	0

 
0
dJC J  $(#($)%)77 7 5/	7
 7 !7 7 7 "7 c]7 
7 7r,   )typingr   r"   r   torch.nnr   r?   r   torch.nn.parameterr   moduler	   __all__r
   r   rJ   r,   r*   <module>r      s@       * (  
'{ {|U6 Ur,   