diff --git a/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/requirements.txt b/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/requirements.txt index 190b531c7b7..dd5467b9c61 100644 --- a/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/requirements.txt +++ b/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/smooth_quant/requirements.txt @@ -9,6 +9,6 @@ wandb einops neural-compressor lm_eval <= 0.4.7 -peft +peft <= 0.17.0 optimum-intel intel_extension_for_pytorch == 2.7.0 diff --git a/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/requirements.txt b/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/requirements.txt index b943752bd55..9af5428c37d 100644 --- a/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/requirements.txt +++ b/examples/pytorch/nlp/huggingface_models/language-modeling/quantization/static_quant/ipex/requirements.txt @@ -9,5 +9,5 @@ wandb einops neural-compressor lm_eval <= 0.4.7 -peft +peft <= 0.17.0 intel_extension_for_pytorch