o �J�h��� @s�ddlZddlZddlZddlZddlmZddlmZmZmZm Z m Z m Z m Z m Z mZmZejr5ddlZddlmZddlmZddlmZmZmZe d�Ze eeed fgefZe eefZejZ d Z!d Z"d Z#d eedefdd�Z$dedeefdd�Z%dd�Z&ee eee eeeee eefe eeefZ'eeeefd fZ(eeeefZ)Gdd�d�Z*de*deede)de'fdd�Z+e�,d�e+�Z-de*d eded!e(def d"d#�Z.de*d eded!e(def d$d%�Z/e�,d&�d'ed(ed)eed fd*ede*f d+d,��Z0d'ed(ed)eed fde ee*ffd-d.�Z1d e eeefd'ed/ed!e def d0d1�Z2d e eeefd'ed!e defd2d3�Z3d e eeefd'ed!e defd4d5�Z4d6ed'ede5fd7d8�Z6d9d:�Z7eZ8d ede8fd;d<�Z9d=d>�Z:e�,d&�d'edefd?d@��Z;ej<d ed'edefdAdB��Z=ej<dCedDed'edefdEdB��Z=ej<dCedDedFed'edef dGdB��Z=ej<dCedDedFedHed'edef dIdB��Z=dJe eefdefdKdB�Z=dS)L�N)� OrderedDict) �Set�Tuple�List�Dict�Union�Callable�Optional�TypeVar�cast�Any�)� EinopsError)� get_backend)�ParsedExpression� _ellipsis� AnonymousAxis�Tensor.)�min�max�sum�mean�prod�any�alli����iay���sequence�returncCsd}|D]}||9}q|S)zSminimalistic product that works both with numbers and symbols. Supports empty listsr �)r�result�elementrr�KC:\pinokio\api\whisper-webui.git\app\env\lib\site-packages\einops\einops.py�_products r!�reduction_type� reduced_axescCsNt|�r ||t|��S|tvsJ�|dkr|�|�std��|�||t|��S)Nrz5reduce_mean is not available for non-floating tensors)�callable�tuple� _reductionsZ is_float_type�NotImplementedError�reduce)�tensorr"r#�backendrrr � _reduce_axes%s  r+c st��t��t��ksJ�tt����tt��d�ddd�D]G}�|d�|dkrh�|d���}�d����dd����d|9<�d|d�tdd��|dd�D���q!���fdd�}|�}tt��d�ddd�D]x}||dur�q�||ddur�q�||d||dkr�|d���}t�fdd�t��D��} t�fd d��D����d����dd����d|9<�} g�| D]} | | kr�q�| | kr��| �q݈�| d�q�|�}q����|fS) Nr �����css�|]}|dVqdS�r Nr��.0�axisrrr � <genexpr>?��z+_optimize_transformation.<locals>.<genexpr>�csTi}tt���D]}|�vrd||<qtdd�|��D��}t���|�||<q|S)Ncss�|]}|duVqdS�Nr�r/�xrrr r1Hr2zB_optimize_transformation.<locals>.build_mapping.<locals>.<genexpr>)�range�lenr�values�list�index)Z init_to_finalr0Zafter_reduction)�axes_reordering� init_shapesr#rr � build_mappingBs z/_optimize_transformation.<locals>.build_mappingc3s�|]}|�vVqdSr4rr5)r#rr r1Vr2c3s$�|] }|�kr |n|dVqdSr-rr.)� removed_axisrr r1Xs�")r8r%�sortedr7r�append) r=r#r<� final_shapes�iZremoved_lengthr>Zinit_axis_to_final_axisZ init_axisZremoved_axis_after_reductionZold_reorderingr0r)r<r=r#r?r �_optimize_transformation2sF  .�   � rDc@sjeZdZdZdeedeeefdeeeeeefdeededeeefdeeefd d �Z d S) �TransformRecipezi Recipe describes actual computation pathway. Recipe can be applied to a tensor or variable. �elementary_axes_lengths�axis_name2elementary_axis�input_composition_known_unknown�axes_permutation�first_reduced_axis� added_axes�output_composite_axescCs.||_||_||_||_||_||_||_dSr4�rFrGrHrIrJrKrL)�selfrFrGrHrIrJrKrLrrr �__init__{s zTransformRecipe.__init__N) �__name__� __module__� __qualname__�__doc__r�intr�strrrOrrrr rErs"� � � �� � �rErN�shape� axes_dimscsd}t|j��|D] \}}|�|j|<q t|j�D]{\}\}}||} t|�dkr7t|�dkr7| �|d<qd} |D]}| �|9} q;t|�dkrct| t�rbt| t�rb| | krbtd| �d| ����n&t| t�r}t| t�r}| | dkr}td| �d| ����|d} | | } | �| <t|�t|�dkr�d}q|r��d t|j ��nd } d}g}|j D]}�fd d �|D�}|� t |��t|�dkr�d}q��fd d �|j ��D�}tt|jt|j ���}t|�t|j �}|j }|j ttt|j ���kr�d }|r�|nd }| |||||fS)z� Reconstruct all actual parameters using shape. Shape is a tuple that may contain integers, shape symbols (tf, theano) and UnknownSize (tf, previously mxnet) known axes can be integers or symbols, but not Nones. Frr zShape mismatch, z != z,Shape mismatch, can't divide axis of length z in chunks of TNc�g|]}�|�qSrr)r/�elementary_axis�� axes_lengthsrr � <listcomp>��z4_reconstruct_from_shape_uncached.<locals>.<listcomp>csi|] \}}|�|�qSrr)r/�posZpos_in_elementaryrZrr � <dictcomp>�s�z4_reconstruct_from_shape_uncached.<locals>.<dictcomp>)r:rFrG� enumeraterHr8� isinstancerTrrIrLrAr!rK�itemsr7rJ)rNrVrWZneed_init_reshaper0�dimZ input_axisZ known_axesZ unknown_axes�lengthZ known_productZ unknown_axisZinferred_lengthr=Zneed_final_reshaperB�grouping�lengthsrKr#Zn_axes_after_adding_axesr<Z _final_shapesrrZr � _reconstruct_from_shape_uncached�sV    � �  � � rgi�reciper)r[c Cs�zt||�|�|�\}}}}} } Wnty+t||�|�|�} | \}}}}} } Ynw|dur6|�||�}|dur@|�||�}t|�dkrNt||||d�}t|�dkr\|j|| |d�}| durf|�|| �}|S)Nr)r"r#r*)Zn_axesZpos2len) �_reconstruct_from_shaperV� TypeErrorrg�reshape� transposer8r+Zadd_axes) r*rhr)r"r[r=r<r#rKrB�n_axes_w_added�_resultrrr � _apply_recipe�s& � �     rocCs�t||j|�\}}}}} } |dur|�||�}|dur!|�||�}t|�dkrDt|�r3||t|��}n|tvs9J�t||�|t|�d�}t|�dkrr|� �D] \} } |j || d�}qNt |j�} |� �D]\} } | | | <qc|� || �}| dur||�|| �}|S)Nr)r0) rirVrk� permute_dimsr8r$r%r&�getattrrb� expand_dimsr:� broadcast_to)�xprhr)r"r[r=r<r#rKrBrmZ axis_position� axis_length� final_shaperrr �_apply_recipe_array_api�s,�         rw��pattern� operation� axes_names�ndimc s(|�d�\}}t|��t|���js�jrtd�|����jr)�jr)td�|���|dkrM�js3�jr7td��t��j �j �}t |�dkrLtd�|���n_|dkr�t� �j �j �}t |�dkrftd �|���t� d d ��j D�h�j �|��}t |�dkr�td �|���n'|t vs�t |�r�t� �j �j �}t |�dkr�td �||���ntd�|t ����j�rDt �j�d}||kr�td|�d|�d���||} dd�t| �D�} g} �jD]} | tkr�| D]} | �| g�q�q�| �| �q�g}�jD]1} | tk�r| D]} |�| g�q�q�g}| D]} | tk�r|�| ��q |�| ��q |�|�q��j �| ��j �t��j�rC�j �| ��j �t�n|t �j�k�rZtdt �j��d|�d����j} �j}t��| D]} | D]}t|t��rx|j�|<�qit�|<�qi�qeg}�j D]}|�v�r�t|t��r�|j�|<nt�|<|�|��q�dd�t��D��|D]}t�|��s�td|��|�v�r�td�|���t�|<�q�g}t| �D]I\}} �fdd �| D�}�fdd �| D�}t |�dk�r�td�|���t |�t |�t | �k�sJ�|��fdd�|D��fdd�|D�f��q�i}tj| �D]}|�j v�r2t |�||<�q$�fdd�t|�D�}ttj| ���ttj|��}�fdd��D�}�fd d�|D�|}�fd!d�|D�}��fd"d�t|�D�}t |�t |�}t t��!���fd#d�|D�|||||d$�S)%z�Perform initial parsing of pattern and provided supplementary info axes_lengths is a tuple of tuples (axis_name, axis_length) �->z?Ellipsis found in right side, but not left side of a pattern {}z?Ellipsis inside parenthesis in the left side is not allowed: {}� rearrangezQNon-unitary anonymous axes are not supported in rearrange (exception is length 1)rzBIdentifiers only on one side of expression (should be on both): {}�repeatz5Unexpected identifiers on the left side of repeat: {}cSsh|] }t|t�s|�qSr)rar)r/�axrrr � <setcomp>?�z1_prepare_transformation_recipe.<locals>.<setcomp>z(Specify sizes for new axes in repeat: {}z9Unexpected identifiers on the right side of reduce {}: {}z'Unknown reduction {}. Expect one of {}.r zWrong shape: expected >=z dims. Received z -dim tensor.cSsg|]}tt|��qSr)rrU)r/rCrrr r\Psz2_prepare_transformation_recipe.<locals>.<listcomp>zWrong shape: expected cSsi|]\}}||�qSrr)r/�position�namerrr r_��z2_prepare_transformation_recipe.<locals>.<dictcomp>zInvalid name for an axisz Axis {} is not used in transformcsh|] }�|tkr|�qSr��_unknown_axis_lengthr.��axis_name2known_lengthrr r���csh|] }�|tkr|�qSrr�r.r�rr r��r�zCould not infer sizes for {}crXrrr.��axis_name2positionrr r\�r]cs"g|] \}}�fdd�|D��qS)crXrrr.r�rr r\�r]z=_prepare_transformation_recipe.<locals>.<listcomp>.<listcomp>r)r/rC�composite_axisr�rr r\�s�csg|] }|�jvr|�qSr�� identifiersr.)�rghtrr r\�r�csg|] }|�jvr|�qSrr�r.��leftrr r\�r�csg|]}��|��qSr)r;r.)�ordered_axis_leftrr r\�r�cs$i|]\}}|�jvr|�|�qSrr�)r/rC� axis_name)r�r�rr r_�s  �csi|]}|�|�qSrrr.r�rr r_�r�rM)"�splitr� has_ellipsisr�formatZhas_ellipsis_parenthesizedZhas_non_unitary_anonymous_axes�set�symmetric_differencer�r8� differencer&r$� compositionr7rrA�extend�update�removerrar�valuer�r`Zcheck_axis_name�_expected_axis_length� itertools�chainr:rEr9)ryrzr{r|�left_str�rght_strr�Zaxes_without_sizeZ n_other_dimsZ ellipsis_ndimZell_axesZleft_compositionr�r0Zrght_composition�groupr�Zrepeat_axes_namesrYZinput_axes_known_unknownrC�known�unknownZaxis_position_after_reductionZresult_axes_groupingZordered_axis_rghtr#Zorder_after_transpositionrIrKrJr)r�r�r�r�r�r �_prepare_transformation_recipe!s�     �  � � � �   �      �  �     �    "�  � � � �r�csT��d�\}}t|��t�j�g}�jr�fdd�td�D�}���fdd�|D�S)z� Internal function, used in layers. Layer makes all recipe creation when it is initialized, thus to keep recipes simple we pre-compute for all dims r}csg|] }t�j�d|�qS)r )r8r�)r/Z ellipsis_dimsr�rr r\��z1_prepare_recipes_for_all_dims.<locals>.<listcomp>�c si|] }|t���|d��qS))r|)r�)r/r|)r{rzryrr r_�r�z1_prepare_recipes_for_all_dims.<locals>.<dictcomp>)r�rr8r�r�r7)ryrzr{r�r��dimsr)r{r�rzryr �_prepare_recipes_for_all_dims�s  r�� reductionc Ks�zCt|t�rt|�dkrtd��t|d�}|�|�}nt|�}t|���}|�|�}t ||t|�t|�d�}t ||t t |�||d�WSt yv}z'd�||�} t|t�s^| d�|�7} n| d7} | d�|�7} t | d �|���d }~ww) a� einops.reduce combines rearrangement and reduction using reader-friendly notation. Some examples: ```python >>> x = np.random.randn(100, 32, 64) # perform max-reduction on the first axis # Axis t does not appear on RHS - thus we reduced over t >>> y = reduce(x, 't b c -> b c', 'max') # same as previous, but using verbose names for axes >>> y = reduce(x, 'time batch channel -> batch channel', 'max') # let's pretend now that x is a batch of images # with 4 dims: batch=10, height=20, width=30, channel=40 >>> x = np.random.randn(10, 20, 30, 40) # 2d max-pooling with kernel size = 2 * 2 for image processing >>> y1 = reduce(x, 'b c (h1 h2) (w1 w2) -> b c h1 w1', 'max', h2=2, w2=2) # same as previous, using anonymous axes, # note: only reduced axes can be anonymous >>> y1 = reduce(x, 'b c (h1 2) (w1 2) -> b c h1 w1', 'max') # adaptive 2d max-pooling to 3 * 4 grid, # each element is max of 10x10 tile in the original tensor. >>> reduce(x, 'b c (h1 h2) (w1 w2) -> b c h1 w1', 'max', h1=3, w1=4).shape (10, 20, 3, 4) # Global average pooling >>> reduce(x, 'b c h w -> b c', 'mean').shape (10, 20) # subtracting mean over batch for each channel; # similar to x - np.mean(x, axis=(0, 2, 3), keepdims=True) >>> y = x - reduce(x, 'b c h w -> 1 c 1 1', 'mean') # Subtracting per-image mean for each channel >>> y = x - reduce(x, 'b c h w -> b c 1 1', 'mean') # same as previous, but using empty compositions >>> y = x - reduce(x, 'b c h w -> b c () ()', 'mean') ``` Parameters: tensor: tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch). list of tensors is also accepted, those should be of the same type and shape pattern: string, reduction pattern reduction: one of available reductions ('min', 'max', 'sum', 'mean', 'prod', 'any', 'all'). Alternatively, a callable f(tensor, reduced_axes) -> tensor can be provided. This allows using various reductions like: np.max, np.nanmean, tf.reduce_logsumexp, torch.var, etc. axes_lengths: any additional specifications for dimensions Returns: tensor of the same type as input rz9Rearrange/Reduce/Repeat can't be applied to an empty list)r{r|)r"r[z2 Error while processing {}-reduction pattern "{}".z Input tensor shape: {}. z Input is list. zAdditional info: {}.z {}N)rar:r8rjrZstack_on_zeroth_dimensionr%rbrVr�ror rrr�) r)ryr�r[r*Zhashable_axes_lengthsrVrh�e�messagerrr r(�s,<      �  ��r(cK�t||fddi|��S)a� einops.rearrange is a reader-friendly smart element reordering for multidimensional tensors. This operation includes functionality of transpose (axes permutation), reshape (view), squeeze, unsqueeze, stack, concatenate and other operations. Examples: ```python # suppose we have a set of 32 images in "h w c" format (height-width-channel) >>> images = [np.random.randn(30, 40, 3) for _ in range(32)] # stack along first (batch) axis, output is a single array >>> rearrange(images, 'b h w c -> b h w c').shape (32, 30, 40, 3) # stacked and reordered axes to "b c h w" format >>> rearrange(images, 'b h w c -> b c h w').shape (32, 3, 30, 40) # concatenate images along height (vertical axis), 960 = 32 * 30 >>> rearrange(images, 'b h w c -> (b h) w c').shape (960, 40, 3) # concatenated images along horizontal axis, 1280 = 32 * 40 >>> rearrange(images, 'b h w c -> h (b w) c').shape (30, 1280, 3) # flattened each image into a vector, 3600 = 30 * 40 * 3 >>> rearrange(images, 'b h w c -> b (c h w)').shape (32, 3600) # split each image into 4 smaller (top-left, top-right, bottom-left, bottom-right), 128 = 32 * 2 * 2 >>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape (128, 15, 20, 3) # space-to-depth operation >>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape (32, 15, 20, 12) ``` When composing axes, C-order enumeration used (consecutive elements have different last axis). Find more examples in einops tutorial. Parameters: tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch). list of tensors is also accepted, those should be of the same type and shape pattern: string, rearrangement pattern axes_lengths: any additional specifications for dimensions Returns: tensor of the same type as input. If possible, a view to the original tensor is returned. r�r~�r(�r)ryr[rrr r~!s7r~cKr�)a� einops.repeat allows reordering elements and repeating them in arbitrary combinations. This operation includes functionality of repeat, tile, and broadcast functions. Examples for repeat operation: ```python # a grayscale image (of shape height x width) >>> image = np.random.randn(30, 40) # change it to RGB format by repeating in each channel >>> repeat(image, 'h w -> h w c', c=3).shape (30, 40, 3) # repeat image 2 times along height (vertical axis) >>> repeat(image, 'h w -> (repeat h) w', repeat=2).shape (60, 40) # repeat image 2 time along height and 3 times along width >>> repeat(image, 'h w -> (h2 h) (w3 w)', h2=2, w3=3).shape (60, 120) # convert each pixel to a small square 2x2. Upsample image by 2x >>> repeat(image, 'h w -> (h h2) (w w2)', h2=2, w2=2).shape (60, 80) # pixelate image first by downsampling by 2x, then upsampling >>> downsampled = reduce(image, '(h h2) (w w2) -> h w', 'mean', h2=2, w2=2) >>> repeat(downsampled, 'h w -> (h h2) (w w2)', h2=2, w2=2).shape (30, 40) ``` When composing axes, C-order enumeration used (consecutive elements have different last axis). Find more examples in einops tutorial. Parameters: tensor: tensor of any supported library (e.g. numpy.ndarray, tensorflow, pytorch). list of tensors is also accepted, those should be of the same type and shape pattern: string, rearrangement pattern axes_lengths: any additional specifications for dimensions Returns: Tensor of the same type as input. If possible, a view to the original tensor is returned. r�rr�r�rrr r[s/rr6c Csjt|dd�}t|��|�}|��rtd|�d|����t|�t|j�krG|jr=t|�t|j�dkr<td|�d|����n td|�d|����|jro|j�t �}|jd|�d gt|�t|j�d|j|dd�}n|j}i}t ||�D]9\}}t|�d kr�|dkr�td |�d|����qy|\} t | t �r�| d kr�||| <qy| j |kr�td |�d|����qy|S) a� Parse a tensor shape to dictionary mapping axes names to their lengths. ```python # Use underscore to skip the dimension in parsing. >>> x = np.zeros([2, 3, 5, 7]) >>> parse_shape(x, 'batch _ h w') {'batch': 2, 'h': 5, 'w': 7} # `parse_shape` output can be used to specify axes_lengths for other operations: >>> y = np.zeros([700]) >>> rearrange(y, '(b c h w) -> b c h w', **parse_shape(x, 'b _ h w')).shape (2, 10, 5, 7) ``` For symbolic frameworks may return symbols, not integers. Parameters: x: tensor of any supported framework pattern: str, space separated names for axes, underscore means skip axis Returns: dict, maps axes names to their lengths T��allow_underscorez'Can't parse shape with composite axes: � r z2Can't parse shape with this number of dimensions: z7Can't parse shape with different number of dimensions: N�_rzLength of axis is not 1: z)Length of anonymous axis does not match: )rrrVZhas_composed_axes� RuntimeErrorr8r�r�r;r�ziprarUr�) r6ry�exprV� ellipsis_idxr�r�axesrur0rrr � parse_shape�sD �  ��� � � �r�c Cs\t|�}|�|�}g}t|�D]\}}dgt|�}|||<|�|�|�d|�|��q|S)aa For an n-dimensional tensor, returns tensors to enumerate each axis. ```python x = np.zeros([2, 3, 4]) # or any other tensor i, j, k = _enumerate_directions(x) result = i + 2*j + 3*k ``` `result[i, j, k] = i + 2j + 3k`, and also has the same shape as result Works very similarly to numpy.ogrid (open indexing grid) r r)rrVr`r8rArk�arange)r6r*rVrZaxis_idrurrr �_enumerate_directions�s r�cCst|��|�S)z� Convert a tensor of an imperative framework (i.e. numpy/cupy/torch/jax/etc.) to `numpy.ndarray` Parameters: tensor: tensor of any known imperative framework Returns: `numpy.ndarray`, converted to numpy )r�to_numpy)r)rrr �asnumpy�s r�cCslt|�dkr td��t|�dkrtd��|d}t|t�r!td��t|�dkr+td��t|t�s4td��dS)Nrz2Singleton () axes are not yet supported in einsum.r z3Shape rearrangement is not yet supported in einsum.z/Anonymous axes are not yet supported in einsum.z&Encountered empty axis name in einsum.z%Axis name in einsum must be a string.)r8r'rarr�rU)r�rrr �_validate_einsum_axis_name�s     �r�cCs<d|vrtd��|�d�\}}dd�|�d�D�}t|dd�}tj}d}i}g}|D]>} d } | jD]1} | tkr<| d 7} q1t| �| d} | |vr\|t|�krRt d ��|||| <|d 7}| || 7} q1|� | �q*d� |�d} |jD](} | tkr~| d 7} qst| �| d} | |vr�t d | �d|�d���| || 7} qs| S)Nr}z!Einsum pattern must contain '->'.cSsg|] }t|ddd��qS)T)r�Zallow_duplicates)r)r/r�rrr r\ r�z2_compactify_pattern_for_einsum.<locals>.<listcomp>�,Tr�r�z...zToo many axes in einsum.r z Unknown axis z on right side of einsum �.) � ValueErrorr�r�string� ascii_lettersr�rr�r8r�rA�joinr)ryZ lefts_strZ right_strZlefts�rightZoutput_axis_namesrCZaxis_name_mappingZ left_patternsr�Z left_patternZ raw_axis_namer�Zcompact_patternrrr �_compactify_pattern_for_einsumsF      r�cC�dSr4r)r)ryrrr �einsum9�r��tensor1�tensor2cCr�r4r)r�r�ryrrr r�=r��tensor3cCr�r4r)r�r�r�ryrrr r�Ar��tensor4cCr�r4r)r�r�r�r�ryrrr r�Er��tensors_and_patterncGs\t|�dkr td��|d}t|t�std��|dd�}t|�}t|d�j|g|�R�S)a einops.einsum calls einsum operations with einops-style named axes indexing, computing tensor products with an arbitrary number of tensors. Unlike typical einsum syntax, here you must pass tensors first, and then the pattern. Also, note that rearrange operations such as `"(batch chan) out"`, or singleton axes `()`, are not currently supported. Examples: For a given pattern such as: ```python >>> x, y, z = np.random.randn(3, 20, 20, 20) >>> output = einsum(x, y, z, "a b c, c b d, a g k -> a b k") ``` the following formula is computed: ```tex output[a, b, k] = \sum_{c, d, g} x[a, b, c] * y[c, b, d] * z[a, g, k] ``` where the summation over `c`, `d`, and `g` is performed because those axes names do not appear on the right-hand side. Let's see some additional examples: ```python # Filter a set of images: >>> batched_images = np.random.randn(128, 16, 16) >>> filters = np.random.randn(16, 16, 30) >>> result = einsum(batched_images, filters, ... "batch h w, h w channel -> batch channel") >>> result.shape (128, 30) # Matrix multiplication, with an unknown input shape: >>> batch_shape = (50, 30) >>> data = np.random.randn(*batch_shape, 20) >>> weights = np.random.randn(10, 20) >>> result = einsum(weights, data, ... "out_dim in_dim, ... in_dim -> ... out_dim") >>> result.shape (50, 30, 10) # Matrix trace on a single tensor: >>> matrix = np.random.randn(10, 10) >>> result = einsum(matrix, "i i ->") >>> result.shape () ``` Parameters: tensors_and_pattern: tensors: tensors of any supported library (numpy, tensorflow, pytorch, jax). pattern: string, einsum pattern, with commas separating specifications for each tensor. pattern should be provided after all tensors. Returns: Tensor of the same type as input, after processing with einsum. r zd`einops.einsum` takes at minimum two arguments: the tensors (at least one), followed by the pattern.r,z^The last argument passed to `einops.einsum` must be a string, representing the einsum pattern.Nr)r8r�rarUr�rr�)r�ry�tensorsrrr r�Is @� � )>� functoolsr�r��typing� collectionsrrrrrrrr r r r � TYPE_CHECKING�numpy�npr�r� _backendsr�parsingrrrrrTZReductionCallablerU� Reduction�Sizer&r�r�r!r+rDZ CookedRecipeZHashableAxesLengthsZFakeHashableAxesLengthsrErg� lru_cacherirorwr�r�r(r~r�dictr�r�Z np_ndarrayr�r�r��overloadr�rrrr �<module>s� 0    87)��� �H���� ����� �"�� �� ��� �  �*&U&:2? 5 $
Memory