Modify using the code provided by huggingface
https://github.com/huggingface/transformers


step1: Install requirements package

pip3 install -r requirements.txt

-------------------------------------------------------------------------------------------------------------
step2: Execute the following command


DeBERTa-QG:

===== Training the QG model ===== 

python3 QG_train.py \
  --model_type deberta \
  --model_name_or_path microsoft/deberta-base \
  --do_train \
  --train_file data/race_train.json \
  --per_gpu_train_batch_size 8 \
  --learning_rate 4e-5 \
  --num_train_epochs 10 \
  --logging_steps 100 \
  --save_steps -1  \
  --max_seq_length 512 \
  --doc_stride 450 \
  --max_query_length 42 \
  --max_answer_length 16 \
  --output_dir DeBERTa_base_QG_race/ \
  --warmup_steps 1000 

===== Evaluate the QG model by testing data ===== 

python3 QG_prediction.py \
  --model_type deberta \
  --model_name_or_path DeBERTa_base_QG_race/epoch-9/ \
  --tokenizer_name microsoft/deberta-base \
  --predict_file  data/race_test.json \
  --max_seq_length 512 \
  --doc_stride 450 \
  --max_query_length 42 \ 
  --max_answer_length 16 \ 
  --beam_size 3 \
  --output_dir DeBERTa_base_QG_race/epoch-9/

===== Inference the QG model ===== 

DeBERTa_QG_prediction.py \
  --model_type deberta \
  --model_name_or_path DeBERTa_base_QG_race/epoch-9/ \
  --tokenizer_name microsoft/deberta-base \
  --max_seq_length 512 \
  --doc_stride 450 \
  --max_query_length 42 \
  --max_answer_length 16 \
  --beam_size 3 \


DeBERTa-KPQG:

===== Train the KPQG model ===== 

 python3 KPQG_train.py \
  --model_type deberta \
  --model_name_or_path microsoft/deberta-base \
  --do_train \
  --train_file data/race_train.json \
  --per_gpu_train_batch_size 8 \
  --learning_rate 4e-5 \
  --num_train_epochs 10 \
  --logging_steps 100 \
  --save_steps -1 \
  --max_seq_length 512 \
  --doc_stride 450 \
  --max_query_length 42 \
  --max_answer_length 16 \
  --output_dir  DeBERTa_base_KPQG_race/ \
  --warmup_steps 1000


===== Evaluate the KPQG model by noun keywords of testing data ===== 

DeBERTa_KPQG_prediction.py \
  --model_type deberta \
  --model_name_or_path DeBERTa_base_KPQG_race/epoch-9/ \
  --tokenizer_name microsoft/deberta-base \
  --predict_file  data/race_test.json \
  --eval_type noun_keywords \
  --max_seq_length 512 \
  --doc_stride 450 \
  --max_query_length 42 \
  --max_answer_length 16 \
  --beam_size 3 \
  --output_dir DeBERTa_base_KPQG_race/epoch-9/ 


===== Inference the KPQG model ===== 

DeBERTa_KPQG_prediction.py \
  --model_type deberta \
  --model_name_or_path DeBERTa_base_KPQG_race/epoch-9/ \
  --tokenizer_name microsoft/deberta-base \
  --max_seq_length 512 \
  --doc_stride 450 \
  --max_query_length 42 \
  --max_answer_length 16 \
  --beam_size 3 \
