o �J�h���@sxUddlZddlZddlZddlZddlZddlZddlZddlmZm Z m Z m Z m Z m Z mZddlZddlmmZddlmZddlmZmZddlmZddlmZeed�o_eed�Zejd d ��Z Gd d �d �Z!d e!defdd�Z"iZ#e e$dfe%d<ej&ej'ej(ej)ej*ej+gZ,Gdd�de!ej-�Z.dd�Z/dd�Z0dd�Z1dd�Z2Gdd�d�Z3e3�a4dMdd �Z5d!d"�Z6d#d$�Z7d%d&�Z8d'd(�Z9d)d*�Z:d+d,�Z;e<�a=e d-e%d.<d/d0�Z>d1d2�Z?d3d4�Z@Gd5d-�d-e!�ZAGd6d7�d7eA�ZBd8d9�ZCd:ejDd;eEfd<d=�ZFGd>d?�d?�ZGd eGfd@dA�ZHGdBdC�dCejI�ZJdDdE�ZKdFdG�ZLGdHdI�dIeJ�ZMGdJdK�dKejI�ZNeN�ZOeNe%dL<dS)N�N)�Any�Callable�Dict�List�Set�Type�Union)�_utils_internal)�_dispatch_is_included_in_alias� DispatchKey)�dispatch_functorch)�TorchDispatchMode�getdlopenflags�setdlopenflagsc csL�tsdVdSt��}t�|tjB�z dVWt�|�dSt�|�w)z� Context manager to set the RTLD_GLOBAL dynamic linker flag while we open a shared library to load custom operators. N)�_SET_GLOBAL_FLAGS�sysrr�ctypes� RTLD_GLOBAL)� old_flags�r�HC:\pinokio\api\whisper-webui.git\app\env\lib\site-packages\torch\_ops.py� dl_open_guards�rc@sHeZdZdZdd�Zdd�Zdd�Zdd �Zd d �Zd d �Z dd�Z dS)� OperatorBasez� Base class for OpOverload (which represents C++ ATen operators) and HigherOrderOperator (which represents Python-only operators that are unrepresentable in TorchScript). cCsi|_i|_i|_i|_dS�N)�_dispatch_cache� py_kernels�python_key_table�functorch_table��selfrrr�__init__.s �  � zOperatorBase.__init__cO�t�r��NotImplementedError�r�args�kwargsrrr�__call__Z�zOperatorBase.__call__cCs ||jvSr)r�r�krrr�has_kernel_for_dispatch_key]� z(OperatorBase.has_kernel_for_dispatch_keycCs,|jD]}tj�|�s|�|�rdSqdS)NTF)r�torch�_C�_dispatch_is_alias_key�has)r�ksr*rrr�has_kernel_for_any_dispatch_key`s �z,OperatorBase.has_kernel_for_any_dispatch_keycs��fdd�}|S)Ncs�t���r#t�t�st�tj�r#��jvsJ�|�j�<�j��|St �tj j j �r9��j vs2J�|�j �<|St �t�s@J��tjksIJd����jvrZtd��d�������|�j�<�j��|S)NzGPlease register a mode for the torch._C.DispatchKey.Python key instead.z%Trying to override a python impl for z on operator )�inspect�isclass� issubclassr r-�Tensorrr�clear� isinstancer.� _functorch� TransformTyperr �Pythonr� RuntimeError�name)�fn�r*rrr�innergs0 � �    � �  z#OperatorBase.py_impl.<locals>.innerr)rr*r@rr?r�py_implfszOperatorBase.py_implcs|ddlm�m�m���fdd�}��fdd�}��fdd�}|�tj�|�|�tjj j �|�|�tj j j j�|��S) Nr)�CppFunctionalizeAPI�FunctorchFunctionalizeAPI�PythonFunctionalizeAPIcs���g|�Ri|��Srr�r%r&)�_CppFunctionalizeAPIr>rr�functionalize_dk_fn�sz?OperatorBase.py_functionalize_impl.<locals>.functionalize_dk_fnc���|�g|�Ri|��Srr)�moder%r&)�_PythonFunctionalizeAPIr>rr�functionalize_dispatch_mode_fn��zJOperatorBase.py_functionalize_impl.<locals>.functionalize_dispatch_mode_fncrHrr)� interpreterr%r&)�_FunctorchFunctionalizeAPIr>rr�functionalize_functorch_fn�rLzFOperatorBase.py_functionalize_impl.<locals>.functionalize_functorch_fn)�#torch._subclasses.functional_tensorrBrCrDrAr � Functionalizer-� _subclasses�functional_tensor�FunctionalTensorModer.r9r:)rr>rGrKrOr)rFrNrJr>r�py_functionalize_impl�s��z"OperatorBase.py_functionalize_implcCr!rr"rrrrr=�r(zOperatorBase.nameN) �__name__� __module__� __qualname__�__doc__r r'r+r2rArUr=rrrrr(s,* r�opr*cCsT|�|�r|Stj}|tjkst||�r|�|�r|Stj}|tjks(t||�r/|�|�r/|S|�tj� |��p=|�tj�}tj }|tjkrTt||�rT|�|�rT|sT|Stj }|tjksat||�rz|�|�rz|tj krv|�tjj �rvtd��|sz|Stj}t||�r�|�|�r�|Stj}t||�r�|�|�r�|Stj�|�r�|Std|�d|����)Nzambiguous autogradother kernelzcould not find kernel for � at dispatch key )r+r �&CompositeExplicitAutogradNonFunctional� Undefined�is_included_in_alias�CompositeExplicitAutogradr2r-r.�*_dispatch_get_backend_keyset_from_autograd�%CompositeImplicitAutogradNestedTensor�CompositeImplicitAutograd� AutogradOther� _dispatch_autogradother_backendsr<�Autograd�FuncTorchBatchedDecomposition�_dispatch_has_backend_fallbackr#)rZr*�candZhas_backend_kernelrrr� resolve_key�sV �� � ����� ri�HigherOrderOperator�_higher_order_opscsbeZdZ�fdd�Z�fdd�Zedd��Zdd�Zd d �Ze j d d ��Z d d�Z dd�Z �ZS)rjcsbt���t|�turtd��||_||_|t|<d|_d|_ t j � �|_ tD]}|�|�q'dS)NzODirect instantiation of HigherOrderOperator is not allowed. Please subclass it.� higher_order�torch.ops.higher_order)�superr �typerjr<�_namerVrk�_nsrWr-r.�_dispatch_keyset_full�non_fallthrough_keys�2_HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS� fallthrough)rr=� dispatch_key�� __class__rrr �s  �  �zHigherOrderOperator.__init__cs0t|t�r|j�|�s|j�|�|_t��|�Sr)r8r rsr0�addrnrAr)rwrrrAs zHigherOrderOperator.py_implcC�|jSr)rqrrrr� namespace�zHigherOrderOperator.namespacecCs|j�|�|_dSr)rs�remove)rrvrrrru�zHigherOrderOperator.fallthroughcs:ddlm}||jvr|j|}t|t�rJ�||i|��S|tjkr)t|||�S|tjkr�g�dd����fdd�}g|�|���RD]}||�t|t t f�rZ|D]}||�qSqDt ��} t dd�| D��} dd lm } |�} | dur�t | �|j vr�|j t | �} | ��}| |g|�Ri|��}Wd�n1s�wYn td |j�d | �d ���|tur�|S| D]4}t |�}|jtjjkr�q�||j vr�|j |} | |i|��}n td |j�d |�d ���|tur�|Sq�td|j�d| �ddd�| D�����tj�|�}|tjk�radd lm } t�dk�ratj�tj��sat�} | du�s*Jd��t | �|j v�s:Jd| �d���|j t | �} | |��}| |g|�Ri|��Wd�S1�s\wYt||�}||jv�r{td|j�d|�d|�d���|tjk�r�|j||j|<|j|}t|t��r�J�||i|��S)Nr��_get_current_dispatch_modecSstj�|��d�S)Nr;)r-r.�_dispatch_keysr0)�tensorrrr�has_python_key2r~z4HigherOrderOperator.dispatch.<locals>.has_python_keycs*t|tj�r�|�r��|�dSdSdSr)r8r-r6�append)�arg�r�Zoverloaded_args_listrr�check_overloaded5s�z6HigherOrderOperator.dispatch.<locals>.check_overloadedcs��|]}t|�VqdSr�ro��.0r�rrr� <genexpr>@s�z/HigherOrderOperator.dispatch.<locals>.<genexpr>)�_pop_mode_temporarilyz%There was no rule registered for HOP z and mode z. We recommend filing an issue.z and subclass zMultiple dispatch failed for zl. There was no registered that did not return NotImplemented. Use HOP.py_impl to register some. Tried mode: z) and subclasses: cSsg|]}t|��qSrr�)r��arrr� <listcomp>osz0HigherOrderOperator.dispatch.<locals>.<listcomp>zRIllegal invocation of dispatch on torch._C.DispatchKey.PreDispatch without a mode.zCurrent active mode z not registeredz.could not find kernel for HigherOrderOperator r[z (resolved from �))�torch.utils._python_dispatchr�rr8r �FuncTorchDynamicLayerFrontModer r;�values�list�tupler�rorr#rp�NotImplemented�__torch_dispatch__r-r.�_disabled_torch_dispatch_impl� TypeError�_to_functionality_key� PreDispatch�&_len_torch_dispatch_stack_pre_dispatch�&_dispatch_tls_is_dispatch_key_excluded�'_get_current_dispatch_mode_pre_dispatchrir)rrvr%r&r��kernelr�r�r��overloaded_args�overloaded_typesr�� curr_mode�handlerrI�resultZ subclass_type�functionality_key� final_keyrr�r�dispatch!s�       � ����  �� � ��    �� � � "�   ���  zHigherOrderOperator.dispatchcs&ddlm}|���fdd��}|�S)Nr)�disablecs\t���}tj�|�rtjj�|g��Ri���St���j�}�j|��g��Ri���Sr) �_to_flat_tupler-� overrides�has_torch_function�handle_torch_function�_compute_keysetrsr��highestPriorityTypeId)� flat_args�dispatch_key_set�r%r&rrr�wrapper�s"  ������z-HigherOrderOperator.__call__.<locals>.wrapper)� torch._dynamor�)rr%r&r�r�rr�rr'�s  zHigherOrderOperator.__call__cCs |���Sr)r=rrrr�__str__�r,zHigherOrderOperator.__str__cCrzr�rprrrrr=��zHigherOrderOperator.name)rVrWrXr rA�propertyr{rur��abc�abstractmethodr'r�r=� __classcell__rrrwrrj�s   } cCstj|i|��Sr)�pytree�arg_tree_leavesrErrrr���r�cCst||�}t||�Sr)� _get_tensors� key_extractor)r%r&rs�tensorsrrrr���  r�cCs t||�}dd�|D�}t|�S)NcSsg|] }t|tj�r|�qSr)r8r-r6)r��trrrr��sz _get_tensors.<locals>.<listcomp>)r�r�)r%r&�flat_all� tensor_argsrrrr��s r�cCs>tj��}|D] }|tj�|�B}q|tj��}||@}|Sr)r-r.�_dispatch_tls_local_include_setr��_dispatch_tls_local_exclude_set)r�Zkey_maskZkey_setr�rrrr��s r�c@s,eZdZdd�Zdd�Zdd�Zdd�Zd S) �_ModeStackStateForPreDispatchcCsddg|_d|_dSr)�*_ModeStackStateForPreDispatch__infra_modes�_schema_check_moderrrrr �r�z&_ModeStackStateForPreDispatch.__init__cCs |t|j�ks J�||j|<dSr��lenr�)r�indexrIrrr�set�sz!_ModeStackStateForPreDispatch.setcCs|t|j�ks J�|j|Srr�)rr�rrr�get�s z!_ModeStackStateForPreDispatch.getcCs"tdd�|jD��t|jdu�S)NcSsg|]}|dur|�qSrr)r��irrrr���z7_ModeStackStateForPreDispatch.count.<locals>.<listcomp>)r�r��intr�rrrr�count�s�z#_ModeStackStateForPreDispatch.countN)rVrWrXr r�r�r�rrrrr��s  r�Fcspt���dus�tjjjtjjjfvsJ�|r�dusJ���fdd�}|�}t�}|dkr6tj�tj d�|S)Ncsh�tjjjkr��d�}t��dd�|S�tjjjkr*��d�}t��dd�|St�j}dt�_|S�Nr�) r-r.�_TorchDispatchModeKey�PROXYr��!mode_stack_state_for_pre_dispatchr�� FUNCTIONALr�)� current_mode�Zcurrent_mode_stack_pre_dispatch�mode_keyrr� _unset_modes  z,unset_mode_pre_dispatch.<locals>._unset_moderF) r�r-r.r�r�r�r��'_dispatch_tls_set_dispatch_key_includedr r�)r�� schema_checkr�r�Znew_pre_dispatch_lenrr�r�unset_mode_pre_dispatch�s  � r�cCs�ddlm}ddlm}ddlm}t||||f�sJ�t�}t||�r5t�j }|dkr0t d��|t�_ n,t||�rNt�� d�}|dusFJ�t�� d|�nt�� d�}|dusZJ�t�� d|�|dkrot j�tjd�dSdS)Nr)rT)�SchemaCheckMode)�ProxyTorchDispatchModezYSchemaCheckMode for pre-dispatch must be used exclusively, found other modes on the stackr�T)rPrT�#torch._subclasses.schema_check_moder��"torch.fx.experimental.proxy_tensorr�r8r�r�r��AssertionErrorr�r�r-r.r�r r�)rIrTr�r�Zprevious_mode_stack_lenr�rrr�_set_mode_pre_dispatchs8   �� �      �r�cCsnt�}t�}|dkrtd��|jdurtddd�S|�d�dur'ttjjj �S|�d�dur5ttjjj �SdS)NrzTrying to pop empty mode stackT)r�r�) r�r�r�r�r�r�r-r.r�r�r�)Z mode_stackZpre_dispatch_lenrrr�_pop_mode_from_pre_dispatchBs  �r�cCs t���Sr)r�r�rrrrr�Qr,r�cCsB|tjjjtjjjfvsJ�|tjjjkrt��d�St��d�Sr�)r-r.r�r�r�r�r�)r�rrr�_get_dispatch_mode_pre_dispatchUs �  r�cCsft�jdur t�jSt���}|dkrt��d�S|dkr1t��d�dur+t��d�St��d�SdS)N�r�r)r�r�r�r�)� stack_lenrrrr�`s    �� �r�cC�tSr)�"_mode_stack_state_for_pre_dispatchrrrrr�p�r�� OpOverload� cached_opscCst�|�dSr)r�ry)� op_overloadrrr� add_cached_opxsr�cCs t��dSr)r�r7rrrr�reset_cached_ops}s r�cCr�r)r�rrrr�get_cached_ops�r�r�cs�eZdZ�fdd�Zedd��Zedd��Zedd��Zd,d d �Zd d �Z dd�Z dd�Z dd�Z dd�Z �fdd�Z�fdd�Zedd��Zdd�Zdd�Zd d!�Zd"d#�Zd$d%�Zed&d'��Zed(d)��Zed*d+��Z�ZS)-r�cst���||_||_||_||_||_|jdkrdn|j|_|jj |_ |jr1|j d|j7_ |jj � d�d�d|j��|_ |j |_ |j |_ |j |_i|_d|_|jtjjv|_d}|jjD]}|jduriqa|durr|jj}qa|jjpw|}qa|duo| |_dS)N��default�.�::r�)rnr �_op�_op_dk�_schema�_overloadpacket�_tags� overload_name� _overloadnamer=rp�splitrVrWrX�__annotations__� _lazy_handler-�libraryZ_defsZ_defined_in_python� arguments� alias_info�is_write�is_view)r�overloadpacketrZZop_dk�schema�tagsrr�rwrrr �s4 �     zOpOverload.__init__cC�|jj�d�dS�Nr�r�r�r=r�rrrr� _namespace��zOpOverload._namespacecCr )Nr�r�r rrrr�_opname�r zOpOverload._opnamecCs(|jdurtj�|jj|jj�|_|jSr)rr-r.�_dispatch_find_schema_or_throwr�r=r�rrrr�_handle�s  �zOpOverload._handleNcC�|Srr�r�memorrr� __deepcopy__�r(zOpOverload.__deepcopy__cC� djg|jj�d��|j�R�S)Nz'<OpOverload(op='{}.{}', overload='{}')>r���formatr�r=r�r�rrrr�__repr__�s  ��zOpOverload.__repr__cOs|j|i|��Sr�r�r$rrrr'�r�zOpOverload.__call__cOs|jj|g|�Ri|��Sr)r�redispatch_boxed)r�keysetr%r&rrr� redispatch�rLzOpOverload.redispatchcC� t|j�Sr��hashr�rrrr�__hash__�r,zOpOverload.__hash__cCr)Nz{}.{}.{}r�rrrrrr��s zOpOverload.__str__cst��|�ptj�|��|�Sr)rnr+r-r.�%_dispatch_has_kernel_for_dispatch_keyr=r)rwrrr+�s ��z&OpOverload.has_kernel_for_dispatch_keycstj�|��|�pt��|�Sr)r-r.�)_dispatch_has_kernel_for_any_dispatch_keyr=rnr2)rr1rwrrr2�s � �z*OpOverload.has_kernel_for_any_dispatch_keycCr r r rrrrr{�r zOpOverload.namespacecCs"tj}||jvptj�|��|�Sr)r rbrr-r.r!r=)r�dkrrr�_can_decompose�s�zOpOverload._can_decomposecOsRtj}||jvr|j||i|��Stj�|��|�r'|j|g|�Ri|��StSr) r rbrr-r.r!r=r�r�)rr%r&r#rrr� decompose�s  zOpOverload.decomposecCs|j�|d�dSr)r�pop)r�keyrrr�_uncache_dispatchr~zOpOverload._uncache_dispatchc s6��jvsJ��d������tjkr8t�t�s&�js&��j�<t���S��fdd�}|�j�<t��|Stj� ��}|tj krYt �}|dkrYtj� tj�sY�fdd�}|St ���}�tj k}�tjkr�ddlmm}|jr�|��|�}|r�|�j�<t��|S�j�||�}|r�|�j�<t��|S)N� cs�ddlm}t|��}|dusJd��|�jvrOt�t�rCtjj� ��}tj jj |�g|�Ri|��Wd�S1s=wYn �j �g|�Ri|��Stjj� ��}�j||g|�Ri|��Wd�S1snwYdS)NrrzMIllegal invocation of dispatch on torch._C.DispatchKey.Python without a mode.) r�r�rorr8�TorchBindOpOverloadr-�utils�_python_dispatchr��_library�handle_dispatch_moder�)r%r&r�r�rI�r'rrrr�s(   �  ���"�$�z)OpOverload._get_dispatch.<locals>.handlerrcsVtjdd��}|��}tjjj|�g|�Ri|��Wd�S1s$wYdS)Ncss(�t�}z |VWt|�dSt|�wr)r�r�)Ztop_moderrr�(_temporarily_pop_modes_from_pre_dispatch8s �z[OpOverload._get_dispatch.<locals>.handler.<locals>._temporarily_pop_modes_from_pre_dispatch)� contextlib�contextmanagerr-r-r+r.)r%r&r0r�rrrr�7s ���$�)rr r;r8r*rr�r-r.r�r�r�r�rirQZtorch._dispatch.python� _dispatch�pythonZCROSSREF_FUNCTIONALIZEZmake_crossref_functionalizerr�) rr'r�r�Zcurr_stack_lenr�Z cache_resultZ pydispatch�rrr/r� _get_dispatchsF     ��       zOpOverload._get_dispatchcCrzrr�rrrrr=dr�zOpOverload.namecCrzr)r�rrrrrgr|zOpOverload.overloadpacketcCrzrrrrrrrZkr|z OpOverload.opcCrzr)r�rrrrror|zOpOverload.tagsr)rVrWrXr r�r rrrrr'rr r�r+r2r{r$r%r(r6r=rrZrr�rrrwrr��s: &       ]  c@s<eZdZdeefdd�Zejdd��Zdd�Z dd �Z d S) r*�returncsDtjtjtjtjtjtjtjg}dtf�fdd� ��fdd�|D�S)Nr'cs@tj����|�rtj����|�S|�jvp�j|tjjuSr)r-r.r!r=�0_dispatch_kernel_for_dispatch_key_is_fallthroughrrZfallthrough_kernel)r'rrr�(_may_use_fallthrough_instead_of_fallback�s� �zWTorchBindOpOverload._fallthrough_keys.<locals>._may_use_fallthrough_instead_of_fallbackcsg|]}�|�r|�qSrr)r�r')r9rrr��s ��z9TorchBindOpOverload._fallthrough_keys.<locals>.<listcomp>)r re� AutogradCPU� AutogradCUDA�ADInplaceOrView� BackendSelect�PythonTLSSnapshot�PythonDispatcher)rZ_DEFAULT_FALLTHROUGH_KEYSr)r9rr�_fallthrough_keys{s� �z%TorchBindOpOverload._fallthrough_keysccsZ�ddlm}m}m}z||vr|||j�dVW||vr#||=dSdS||vr,||=w)Nr)� _EffectType�_register_effectful_op� SIDE_EFFECTS)Ztorch._higher_order_ops.effectsrArBrC�ORDERED)rrArBrCrrr�%_register_as_effectful_op_temporarily�s�   �z9TorchBindOpOverload._register_as_effectful_op_temporarilycOsTt||�r"|���|�|||���Wd�S1swY|j|i|��Sr)�_must_dispatch_in_pythonrE�_dispatch_in_pythonr@r�r$rrrr'�s   �zTorchBindOpOverload.__call__c Cs�tj��}|D]}|�|�}qt|||�}|��}||jvr#|�|�n|j|}t|t �rNtj� |� �|�r@|� ||||g�St d|�d|�d|�d���t|t�sUJ�||i|��S)Nz Torchbind op z4 received a FakeScriptObject input when dispatching z�. but no python implementation is found. Please file an issue on this when you encounter this error. This error can happen when you export or compile the model. It can still happpen even if a C++ implementation for zz. has been registered. That's because FakeScriptObject purely lives in python and cannot work with a C++ implementation.)r-r.rrr}r�r�rr6r8r r8r=rGr<r) rr%r&Zfallthrough_keysrsr'r�rvr�rrrrG�s.     �� � ��� z'TorchBindOpOverload._dispatch_in_pythonN) rVrWrXrr r@r1r2rEr'rGrrrrr*zs   r*cCst�dd�||f�S)NcSst|tjjj�Sr)r8r-r-Zfake_class_registryZFakeScriptObject)�objrrr�<lambda>�s �z*_must_dispatch_in_python.<locals>.<lambda>)r��tree_anyrErrrrF�s�rFrr7cCstdd�|jD��S)Ncss�|] }t|jtj�VqdSr)r8ror-� ClassTyper�rrrr��s�z)_has_script_object_arg.<locals>.<genexpr>)�anyr)rrrr�_has_script_object_arg�srMc@sneZdZdd�Zddd�Zdd�Zdd �Zd d �Zed d ��Z edd��Z dd�Z dd�Z dd�Z dd�ZdS)�OpOverloadPacketcCs<||_||_||_||_g|_tdd�|j��D��|_dS)Ncsr�r)rM)r�rrrrr��s� �z,OpOverloadPacket.__init__.<locals>.<genexpr>) �_qualified_op_namerVr��_overload_names�_dirrL�_schemasr��_has_torchbind_op_overload)r�qualified_op_name�op_namerZ�overload_namesrrrr �s�zOpOverloadPacket.__init__NcCrrrrrrrr�r(zOpOverloadPacket.__deepcopy__cC�dj|j�d��S)Nz<OpOverloadPacket(op='{}.{}')>r��rrOr�rrrrrs �zOpOverloadPacket.__repr__cCrrrrrrrr r,zOpOverloadPacket.__hash__cCrW)Nz{}.{}r�rXrrrrr� r~zOpOverloadPacket.__str__cCrzrrrrrrrZ r|zOpOverloadPacket.opcs�fdd��jD�S)Ncsi|] }|tj��j|��qSr)r-r.� _get_schemarO)r�r�rrr� <dictcomp>s��z-OpOverloadPacket._schemas.<locals>.<dictcomp>�rPrrrrrRs �zOpOverloadPacket._schemasc Cs.|dkrdSz|�d�rt|j|�WSWnty/tdt|��dt|j��d|�d��d�wzQ|dkr7d n|}tj�|j|�}|durRtd t|��d |�d���|\}}}tj� |j|�}t |�skt |||||�nt |||||�}t |||�|j�|�|WSty�td t|��d |�d��d�w) N�__file__� torch.ops�__�'zH' can't have an overload name beginning with '__' and the underlying op z has no attribute z either.r�r�zThe underlying op of 'z' has no overload name ')� startswith�getattrr��AttributeError�strr-r.�_get_operation_overloadrOrYrMr�r*�setattrrQr�r<) rr'Zuse_keyZ op_dk_tagsZop_Zop_dk_rr�overloadrrr� __getattr__sT �  ����� �� ��   ���zOpOverloadPacket.__getattr__cCrr��iterrQrrrr�__iter__Lr,zOpOverloadPacket.__iter__cOs0|jrt||�rt|||�S|j|i|pi��Sr)rSrF�!_call_overload_packet_from_pythonr�r$rrrr'Qs zOpOverloadPacket.__call__cCsdd�|jD�S)NcSsg|]}|r|nd�qS)r�r)r��nrrrr�`r�z.OpOverloadPacket.overloads.<locals>.<listcomp>r[rrrr� overloads_r�zOpOverloadPacket.overloadsr)rVrWrXr rrr r�r�rZrRrgrjr'rmrrrrrN�s   5 rNc Cs�tjj|g|�Ri|��\}}|r|Si}d}|��D]0}t||�}ztjj|jg|�Ri|��} |}WntyK} z | ||<WYd} ~ qd} ~ ww|rU||i|��Sd|�d�} t|� ��D]\} \} }| d| �d|�d�7} qat| ��)Nz'Fail to match any TorchBindOverload of z with following exceptions: zOverload name z: � ) r-r.�(_maybe_call_torch_function_for_op_packetrmra�&_check_schema_allow_fake_script_objectr�r<� enumerate�items)rZr%r&Ztorch_function_called�ret� exceptionsZfound_opr�r��_�e�err_msgr�r'�msgrrrrkesD�� �  ����� �rkcs0eZdZdZ�fdd�Zdd�Zdd�Z�ZS)� _OpNamespacea0 An op namespace to dynamically bind Operators into Python. Say a user has created a custom Operator called "my_namespace::my_op". To call this op, the user will write torch.ops.my_namespace.my_op(...). At startup, this operation will not yet be bound into Python. Instead, the following sequence of magic tricks will occur: 1. `torch.ops.my_namespace` will invoke the `__getattr__` magic method on the `torch.ops` object, which will create a new `_OpNamespace` object called `my_namespace` and set it as an attribute on the `ops` object. 2. `torch.ops.my_namespace.my_op` will then invoke `__getattr__` on the `my_namespace` object, which will retrieve the operation via `torch.get_operation`, a function bound from C++, and then in a similar fashion bind this new object onto the `my_namespace` object. 3. `torch.ops.my_namespace.my_op(...)` then calls this new operation and subsequent accesses will incur no further lookup (the namespace and operation will already exist). cs t��d|�||_g|_dS)Nz torch.ops.)rnr r=rQ)rr=rwrrr �s z_OpNamespace.__init__cCrrrhrrrrrj�r,z_OpNamespace.__iter__c Cs�|dkrdS|dvrtd|�d|j�d���|j}|�d|��}|jd|}zt||�\}}|dur?td |j�d |�d���WntyY}z td |j�d |�d��|�d}~ww||_t||||�}|jd||_t|||�|j�|�|S) Nr\r])� __origin__�__self__zInvalid attribute 'z' for '_OpNamespace' 'r_r�r�z'_OpNamespace' '�' object has no attribute ') rbr=rW� _get_packetr<rNrerQr�) rrUZnamespace_namerT� module_namerZrVrvZopoverloadpacketrrrrg�s@��������  z_OpNamespace.__getattr__)rVrWrXrYr rjrgr�rrrwrry�s  rycCs6tj�|�\}}|durtjj�||�||_||fSr)r-r.�_jit_get_operation�jit� _builtinsZ_register_builtinrW)�qualnameZ op_modulerZrVrrrr}�s r}cCs0t|j|jj�\}}|dusJ�||_||_dSr)r}rOr�rWrP)ZpacketrZrVrrr�_refresh_packet�s  r�cs$eZdZ�fdd�Zdd�Z�ZS)�_PyOpNamespacecst��|�||_dSr)rnr �_ops)rr=�opsrwrrr �s  z_PyOpNamespace.__init__cCs>|j�|d�}|durtd|j�d|�d���t|||�|S)Nz'_PyOpNamespace' 'r|r_)r�r�rbr=re)rr=rZrrrrg�s� z_PyOpNamespace.__getattr__)rVrWrXr rgr�rrrwrr��s r�cs@eZdZdZ�fdd�Zdd�Zdd�Zdd �Zd d �Z�Z S) �_Opsz_ops.pycs*t��d�t�|_tdt�|_g|_dS)Nr]rm)rnr r��loaded_librariesr�rk�_higher_order_op_namespacerQrrwrrr s � z _Ops.__init__cCs2|dkr|jSt|�}t|||�|j�|�|S)Nrl)r�ryrerQr�)rr=r{rrrrg s   z_Ops.__getattr__cCrrrhrrrrrjr,z _Ops.__iter__cCst�|�dS)a{ Imports a Python module that has torch.library registrations. Generally, to extend PyTorch with custom operators, a user will create a Python module whose import triggers registration of the custom operators via a torch.ops.load_library call or a call to one or more torch.library.* APIs. It is unexpected for Python modules to have side effects, so some linters and formatters will complain. Use this API to import Python modules that contain these torch.library side effects. Args: module (str): The name of the Python module to import N)� importlib� import_module)r�modulerrrr�sz_Ops.import_modulecCsVt��rdSt�|�}t�� t�|�Wd�n1swY|j�|�dS)a  Loads a shared library from the given path into the current process. The library being loaded may run global initialization code to register custom operators with the PyTorch JIT runtime. This allows dynamically loading custom operators. For this, you should compile your operator and the static registration code into a shared library object, and then call ``torch.ops.load_library('path/to/libcustom.so')`` to load the shared object. After the library is loaded, it is added to the ``torch.ops.loaded_libraries`` attribute, a set that may be inspected for the paths of all libraries loaded using this function. Args: path (str): A path to a shared library to load. N) r-�_running_with_deployr �resolve_library_pathrr�CDLLr�ry)r�pathrrr� load_library,s  �z_Ops.load_library) rVrWrXr\r rgrjr�r�r�rrrwrr�s  r�r�)F)Pr�r1rr�r3r�types�typingrrrrrrrr-�torch.utils._pytreer+�_pytreer�r �torch._Cr r^r Ztorch._functorch.pyfunctorchr r�r �hasattrrr2rrrirkrcr�r?r>r<r=� AutocastCPU� AutocastCUDArt�ABCrjr�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r�r*rF�FunctionSchema�boolrMrNrk� ModuleTyperyr}r�r�r�r�rrrr�<module>sx $     5� I $' rg u5E K
Memory