from transformers import TrainingArguments
training_args = TrainingArguments("test-trainer")
TrainingArguments 报错,报错信息如下
ImportError Traceback (most recent call last)
in <cell line: 3>()
1 from transformers import TrainingArguments
2
----> 3 training_args = TrainingArguments(“test-trainer”)
4 frames
/usr/local/lib/python3.10/dist-packages/transformers/training_args.py in init(self, output_dir, overwrite_output_dir, do_train, do_eval, do_predict, evaluation_strategy, prediction_loss_only, per_device_train_batch_size, per_device_eval_batch_size, per_gpu_train_batch_size, per_gpu_eval_batch_size, gradient_accumulation_steps, eval_accumulation_steps, eval_delay, learning_rate, weight_decay, adam_beta1, adam_beta2, adam_epsilon, max_grad_norm, num_train_epochs, max_steps, lr_scheduler_type, warmup_ratio, warmup_steps, log_level, log_level_replica, log_on_each_node, logging_dir, logging_strategy, logging_first_step, logging_steps, logging_nan_inf_filter, save_strategy, save_steps, save_total_limit, save_safetensors, save_on_each_node, no_cuda, use_mps_device, seed, data_seed, jit_mode_eval, use_ipex, bf16, fp16, fp16_opt_level, half_precision_backend, bf16_full_eval, fp16_full_eval, tf32, local_rank, ddp_backend, tpu_num_cores, tpu_metrics_debug, debug, dataloader_drop_last, eval_steps, dataloader_num_workers, past_index, run_name, disable_tqdm, remove_unused_columns, label_names, load_best_model_at_end, metric_for_best_model, greater_is_better, ignore_data_skip, sharded_ddp, fsdp, fsdp_min_num_params, fsdp_config, fsdp_transformer_layer_cls_to_wrap, deepspeed, label_smoothing_factor, optim, optim_args, adafactor, group_by_length, length_column_name, report_to, ddp_find_unused_parameters, ddp_bucket_cap_mb, dataloader_pin_memory, skip_memory_met…
/usr/local/lib/python3.10/dist-packages/transformers/training_args.py in post_init(self)
1338 self.framework == “pt”
1339 and is_torch_available()
-> 1340 and (self.device.type != “cuda”)
1341 and (get_xla_device_type(self.device) != “GPU”)
1342 and (self.fp16 or self.fp16_full_eval)
/usr/local/lib/python3.10/dist-packages/transformers/training_args.py in device(self)
1762 “”"
1763 requires_backends(self, [“torch”])
-> 1764 return self._setup_devices
1765
1766 @property
/usr/local/lib/python3.10/dist-packages/transformers/utils/generic.py in get(self, obj, objtype)
52 cached = getattr(obj, attr, None)
53 if cached is None:
—> 54 cached = self.fget(obj)
55 setattr(obj, attr, cached)
56 return cached
/usr/local/lib/python3.10/dist-packages/transformers/training_args.py in _setup_devices(self)
1670 if not is_sagemaker_mp_enabled():
1671 if not is_accelerate_available(min_version=“0.20.1”):
-> 1672 raise ImportError(
1673 “Using the Trainer
with PyTorch
requires accelerate>=0.20.1
: Please run pip install transformers[torch]
or pip install accelerate -U
”
1674 )
ImportError: Using the Trainer
with PyTorch
requires accelerate>=0.20.1
: Please run pip install transformers[torch]
or pip install accelerate -U
文章来源:https://www.toymoban.com/news/detail-482842.html
解决方式
降低transformers的版本文章来源地址https://www.toymoban.com/news/detail-482842.html
pip install transformers==4.24.0
到了这里,关于TrainingArguments 报错,降低transformers版本即可解决的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!