o
    0iC                     @   s  d dl Z d dlZ d dlZd dlZd dlZd dlmZmZ d dlZd dl	m
Z
 d dlmZ ddlmZmZmZ ddlmZmZ edd	d
ZdZerczd dlm  mZ d dlZd	ZW n	 eyb   Y nw eZej Z dddZ!de"fddZ#dd Z$dd Z%dd Z&dd Z'dd Z(dd Z)dd Z*dd  Z+d!d" Z,d#d$ Z-d%d& Z.d'd( Z/edd)d*Z0d+d, Z1d-d. Z2d/d0 Z3dd1d2Z4d3d4 Z5d5d6 Z6d7d8 Z7d9d: Z8dd;d<Z9d=d> Z:d?d@ Z;dAdB Z<dCdD Z=dEdF Z>dGdH Z?dIdJ Z@dKdL ZAdMdN ZBdOdP ZCdQdR ZDdSdT ZEdUdV ZFdWdX ZGdYdZ ZHd[d\ ZId]d^ ZJd_d` ZKdadb ZLdcdd ZMdedf ZNdgdh ZOddjdkZPdldm ZQeddndoZReddpdqZSeddrdsZTeddtduZUeddvdwZVdxdy ZWeddzd{ZXd|d} ZYd~d ZZdd Z[dd Z\dd Z]dd Z^dddZ_dS )    N)	lru_cachewraps)version)parse   )parse_flag_from_envpatch_environmentstr_to_bool)compare_versionsis_torch_versionUSE_TORCH_XLAT)defaultFc                 C   sP   t j| d u}|r&zt j|d u r| n|}W dS  t jjy%   Y dS w d S )NTF)	importlibutil	find_specmetadataPackageNotFoundError)pkg_namemetadata_namepackage_exists_ r   Z/sda-disk/www/egybert/egybert_env/lib/python3.10/site-packages/accelerate/utils/imports.py_is_package_available2   s   r   returnc                   C   s   t S N)_torch_distributed_availabler   r   r   r   is_torch_distributed_available>   s   r   c                   C   s$   t ddrtjj S t rdS dS )N>=z2.7.0F)r   torchdistributeddistributed_c10dis_xccl_availableis_ipex_availabler   r   r   r   r"   B   s
   
r"   c                   C   s0   zW n t y   td Y nw tjdd uS )NzIntel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) XPUs, but it is not detected. If you see "ValueError: Invalid backend: 'ccl'" error, please install Intel(R) oneCCL Bindings for PyTorch*.oneccl_bindings_for_pytorch)ImportErrorprintr   r   r   r   r   r   r   is_ccl_availableJ   s   r'   c                   C   s   t jdS )Noneccl_bind_pt)r   r   r   r   r   r   r   get_ccl_versionV   s   r)   c                   C      t dS )Nimport_timerr   r   r   r   r   is_import_timer_availableZ      r-   c                   C   s   t dpt ddS )Npynvmlznvidia-ml-pyr,   r   r   r   r   is_pynvml_available^      r0   c                   C   r*   )Npytestr,   r   r   r   r   is_pytest_availableb   r.   r3   c                   C   
   t ddS )Nmsampzms-ampr,   r   r   r   r   is_msamp_availablef      
r6   c                   C   r*   )Nschedulefreer,   r   r   r   r   is_schedulefree_availablej   r.   r9   c                   C   s   t  rtddS tddS )Nintel_transformer_enginezintel-transformer-enginetransformer_enginetransformer-engine)is_hpu_availabler   r   r   r   r   is_transformer_engine_availablen   s   

r>   c                  C   s(   t ddrdd lm}  | j d S dS )Nr;   r<   r   F)r   transformer_engine.pytorchpytorchfp8check_mxfp8_support)ter   r   r   %is_transformer_engine_mxfp8_availableu   s   
rD   c                   C   r*   )N
lomo_optimr,   r   r   r   r   is_lomo_available}   r.   rF   c                  C   s:   t dd tj } W d   | S 1 sw   Y  | S )z
    Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda
    uninitialized.
    1)PYTORCH_NVML_BASED_CUDA_CHECKN)r   r   cudais_available)	availabler   r   r   is_cuda_available   s   
rL   c                 C   s@   | r|rJ dt sdS |rtj dv S | rtj dkS dS )z
    Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set
    the USE_TORCH_XLA to false.
    z6The check_is_tpu and check_is_gpu cannot both be true.F)GPUCUDATPUT)_torch_xla_available	torch_xlaruntimedevice_type)check_is_tpucheck_is_gpur   r   r   is_torch_xla_available   s   rV   c                  C   .   t d} | rttjd}t|ddS dS )Ntorchaor   z0.6.1Fr   r   r   r   r   r
   )r   torchao_versionr   r   r   is_torchao_available   
   r[   c                   C   r*   )N	deepspeedr,   r   r   r   r   is_deepspeed_available   r.   r^   c                   C   r4   Nr   z2.4.0r   r   r   r   r   is_pippy_available   r7   ra   c                 C   sZ   t ddr|  S t rtj S t rtj S t r tj S t	 r+tj
jddS dS )z8Checks if bf16 is supported, optionally ignoring the TPUT)rT      r   )rV   rL   r   rI   is_bf16_supportedis_mlu_availablemluis_xpu_availablexpuis_mps_availablebackendsmpsis_macos_or_newer)
ignore_tpur   r   r   is_bf16_available   s   



rm   c                   C   s   t  rdS dS )zChecks if fp16 is supportedFT)is_habana_gaudi1r   r   r   r   is_fp16_available   s   ro   c                   C   s   t  pt pt S )zChecks if fp8 is supported)r6   r>   r[   r   r   r   r   is_fp8_available   s   rp   c                  C   rW   )Nbitsandbytesr   z0.39.0FrY   r   bnb_versionr   r   r   is_4bit_bnb_available   r\   rt   c                  C   rW   )Nrq   r   z0.37.2FrY   rr   r   r   r   is_8bit_bnb_available   r\   ru   c                 C   s6   t d}|r| d urttjd}t|d| S |S )Nrq   r   rY   )min_versionr   rs   r   r   r   is_bnb_available   s
   rw   c                  C   s$   t  sdS dd l} dt| dt v S )NFr   multi_backendfeatures)rw   rq   getattrset)bnbr   r   r   'is_bitsandbytes_multi_backend_available   s   r}   c                   C   r*   )Ntorchvisionr,   r   r   r   r   is_torchvision_available   r.   r   c               
   C   s   t tjdddkrHtjdd urJzttj	d} t
| ddr*tjddW S W d S  tyG } ztd	|  W Y d }~d
S d }~ww d S d S )NACCELERATE_USE_MEGATRON_LMFalser   megatronzmegatron-corer   0.8.0z	.trainingz)Parse Megatron version failed. Exception:F)r	   osenvirongetr   r   r   r   r   r   r
   	Exceptionwarningswarn)megatron_versioner   r   r   is_megatron_lm_available   s   r   c                   C   r*   )Ntransformersr,   r   r   r   r   is_transformers_available   r.   r   c                   C   r*   )Ndatasetsr,   r   r   r   r   is_datasets_available   r.   r   c                   C   r*   )Npeftr,   r   r   r   r   is_peft_available  r.   r   c                   C   r*   )Ntimmr,   r   r   r   r   is_timm_available  r.   r   c                   C   s   t  rtddS tdS )Ntritonzpytorch-triton-xpu)rf   r   r   r   r   r   is_triton_available  s   
r   c                  C   rW   )Naim<z4.0.0FrY   )r   aim_versionr   r   r   is_aim_available  r\   r   c                   C   s   t dpt dS )NtensorboardtensorboardXr,   r   r   r   r   is_tensorboard_available  s   r   c                   C   r*   )Nwandbr,   r   r   r   r   is_wandb_available  r.   r   c                   C   r*   )Ncomet_mlr,   r   r   r   r   is_comet_ml_available!  r.   r   c                   C   r*   )Nswanlabr,   r   r   r   r   is_swanlab_available%  r.   r   c                   C   s   t jdkotdS )N)   
   trackio)sysversion_infor   r   r   r   r   is_trackio_available)  r1   r   c                   C   r*   )Nboto3r,   r   r   r   r   is_boto3_available-  r.   r   c                   C   s   t dr	tddS dS )NrichACCELERATE_ENABLE_RICHF)r   r   r   r   r   r   is_rich_available1  s   
r   c                   C   r*   )N	sagemakerr,   r   r   r   r   is_sagemaker_available7  r.   r   c                   C   r*   )Ntqdmr,   r   r   r   r   is_tqdm_available;  r.   r   c                   C   r*   )Nclearmlr,   r   r   r   r   is_clearml_available?  r.   r   c                   C   r*   )Npandasr,   r   r   r   r   is_pandas_availableC  r.   r   c                   C   r*   )N
matplotlibr,   r   r   r   r   is_matplotlib_availableG  r.   r   c                  C   sL   t drdS tjdd ur$z	tjd} W dS  tjjy#   Y dS w dS )NmlflowTzmlflow-skinnyF)r   r   r   r   r   r   )r   r   r   r   is_mlflow_availableK  s   r   1.12c                 C   s"   t d| otjj otjj S )zHChecks if MPS device is available. The minimum version required is 1.12.r   )r   r   ri   rj   rJ   is_built)rv   r   r   r   rh   X  s   "rh   c               	   C   s   dd } t jd}t jddu rdS d}zt jd}W n t jjy*   Y dS w | |}| |}||krHtd| d	| d
| d dS dS )zChecks if ipex is installed.c                 S   s$   t t| jd t t| j S )N.)strr   r   majorminor)full_versionr   r   r    get_major_and_minor_from_versionb  s   $z;is_ipex_available.<locals>.get_major_and_minor_from_versionr   intel_extension_for_pytorchNFzN/AzIntel Extension for PyTorch z needs to work with PyTorch z.*, but PyTorch z? is found. Please switch to the matching version and run again.T)r   r   r   r   r   r   r   r   )r   _torch_version_ipex_versiontorch_major_and_minoripex_major_and_minorr   r   r   r#   _  s(   r#   c                 C   sV   t jddu r
dS ddl}tdd tj }W d   |S 1 s$w   Y  |S )z
    Checks if `mlu` is available via an `cndev-based` check which won't trigger the drivers and leave mlu
    uninitialized.
    	torch_mluNFr   rG   )PYTORCH_CNDEV_BASED_MLU_CHECK)r   r   r   r   r   r   re   rJ   )check_devicer   rK   r   r   r   rd   x  s   
rd   c                 C   `   t jddu r
dS ddl}| r&ztj }tj W S  ty%   Y dS w t	tdo/tj S )zSChecks if `torch_musa` is installed and potentially if a MUSA is in the environment
torch_musaNFr   musa)
r   r   r   r   r   r   device_countrJ   RuntimeErrorhasattr)r   r   r   r   r   r   is_musa_available     
r   c                 C   sz   t jddu r
dS zddl}W n
 ty   Y dS w | r3ztj }tj W S  t	y2   Y dS w t
tdo<tj S )zQChecks if `torch_npu` is installed and potentially if a NPU is in the environment	torch_npuNFr   npu)r   r   r   r   r   r   r   r   rJ   r   r   )r   r   r   r   r   r   is_npu_available  s   
r   c                 C   r   )zSChecks if `torch_sdaa` is installed and potentially if a SDAA is in the environment
torch_sdaaNFr   sdaa)
r   r   r   r   r   r   r   rJ   r   r   )r   r   r   r   r   r   is_sdaa_available  r   r   c                 C   s\   t jddu st jddu rdS ddl}| r$ddlm  m  m} ttdo-tj	
 S )zQChecks if `torch.hpu` is installed and potentially if a HPU is in the environmenthabana_frameworksNzhabana_frameworks.torchFr   hpu)r   r   r   habana_frameworks.torch(habana_frameworks.torch.distributed.hcclr   r    hcclr   r   rJ   )	init_hcclr   r   r   r   r   r=     s   r=   c                  C   s6   t  rdd lm  m  m}  |  | jjkrdS dS )Nr   TF)r=   *habana_frameworks.torch.utils.experimentalr   utilsexperimental_get_device_typesynDeviceTypesynDeviceGaudi)htexpr   r   r   rn     s
   rn   c                 C   sb   t  rddl}ntddrdS | r'ztj }tj W S  ty&   Y dS w ttdo0tj S )z
    Checks if XPU acceleration is available either via `intel_extension_for_pytorch` or via stock PyTorch (>=2.4) and
    potentially if a XPU is in the environment
    r   Nz<=z2.3Frg   )	r#   r   r   r   rg   r   rJ   r   r   )r   r   r   r   r   r   rf     s   


rf   c                   C   r*   )Ndvcliver,   r   r   r   r   is_dvclive_available  r.   r   c                   C   r*   )N	torchdatar,   r   r   r   r   is_torchdata_available  r.   r   c                  C   rW   )Nr   r   r   FrY   )r   torchdata_versionr   r   r   *is_torchdata_stateful_dataloader_available  r\   r   c                       t   fdd}|S )zc
    A decorator that ensures the decorated function is only called when torchao is available.
    c                     s   t  std | i |S )Nze`torchao` is not available, please install it before calling this function via `pip install torchao`.)r[   r%   )argskwargsfuncr   r   wrapper  s
   z!torchao_required.<locals>.wrapperr   r   r   r   r   r   torchao_required  s   r   c                    r   )zc
    A decorator that ensures the decorated function is only called when deepspeed is enabled.
    c                     sF   ddl m} ddlm} |ji kr| j|jkrtd | i |S )Nr   )AcceleratorState)DistributedTypez|DeepSpeed is not enabled, please make sure that an `Accelerator` is configured for `deepspeed` before calling this function.)accelerate.stater   accelerate.utils.dataclassesr   _shared_statedistributed_type	DEEPSPEED
ValueError)r   r   r   r   r   r   r   r     s   z#deepspeed_required.<locals>.wrapperr   r   r   r   r   deepspeed_required  s   r   c                   C   r4   r_   r`   r   r   r   r   is_weights_only_available,  s   
r   1.25.0c                 C   s   t tjd}t|d| S )Nnumpyr   )r   r   r   r   r
   )rv   numpy_versionr   r   r   is_numpy_available2  s   r  r   )FF)F)r   )r   )`r   importlib.metadatar   r   r   	functoolsr   r   r   	packagingr   packaging.versionr   environmentr   r   r	   versionsr
   r   r   rP   torch_xla.core.xla_modelcore	xla_modelxmtorch_xla.runtimerQ   r%   _tpu_availabler    rJ   r   r   boolr   r"   r'   r)   r-   r0   r3   r6   r9   r>   rD   rF   rL   rV   r[   r^   ra   rm   ro   rp   rt   ru   rw   r}   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rh   r#   rd   r   r   r   r=   rn   rf   r   r   r   r   r   r   r  r   r   r   r   <module>   s   



	

