|  | trec-covid | 0.5947 | 0.1091 |  | 0.6559 | 0.1141 |  | 0.7274 | 0.1282 |  | 0.5964 | 0.0907 |  | 0.7815 | 0.1406 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-trec-covid.flat \
  --topics beir-v1.0.0-trec-covid-test \
  --output run.beir.bm25-flat.trec-covid.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
  run.beir.bm25-flat.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-covid-test \
  run.beir.bm25-flat.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-covid-test \
  run.beir.bm25-flat.trec-covid.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-trec-covid.multifield \
  --topics beir-v1.0.0-trec-covid-test \
  --output run.beir.bm25-multifield.trec-covid.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
  run.beir.bm25-multifield.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-covid-test \
  run.beir.bm25-multifield.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-covid-test \
  run.beir.bm25-multifield.trec-covid.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-trec-covid.splade-pp-ed \
  --topics beir-v1.0.0-trec-covid.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.trec-covid.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
  run.beir.splade-pp-ed.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-covid-test \
  run.beir.splade-pp-ed.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-covid-test \
  run.beir.splade-pp-ed.trec-covid.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-trec-covid.contriever-msmarco \
  --topics beir-v1.0.0-trec-covid-test \
  --output run.beir.contriever-msmarco.trec-covid.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
  run.beir.contriever-msmarco.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-covid-test \
  run.beir.contriever-msmarco.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-covid-test \
  run.beir.contriever-msmarco.trec-covid.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-trec-covid.bge-base-en-v1.5 \
  --topics beir-v1.0.0-trec-covid-test \
  --output run.beir.bge-base-en-v1.5.trec-covid.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-covid-test \
  run.beir.bge-base-en-v1.5.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-covid-test \
  run.beir.bge-base-en-v1.5.trec-covid.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-covid-test \
  run.beir.bge-base-en-v1.5.trec-covid.txt
Command to generate run:
   | 
|  | bioasq | 0.5225 | 0.7687 |  | 0.4646 | 0.7145 |  | 0.4980 | 0.7385 |  | 0.3829 | 0.6072 |  | 0.4148 | 0.6316 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-bioasq.flat \
  --topics beir-v1.0.0-bioasq-test \
  --output run.beir.bm25-flat.bioasq.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
  run.beir.bm25-flat.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-bioasq-test \
  run.beir.bm25-flat.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-bioasq-test \
  run.beir.bm25-flat.bioasq.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-bioasq.multifield \
  --topics beir-v1.0.0-bioasq-test \
  --output run.beir.bm25-multifield.bioasq.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
  run.beir.bm25-multifield.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-bioasq-test \
  run.beir.bm25-multifield.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-bioasq-test \
  run.beir.bm25-multifield.bioasq.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-bioasq.splade-pp-ed \
  --topics beir-v1.0.0-bioasq.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.bioasq.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
  run.beir.splade-pp-ed.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-bioasq-test \
  run.beir.splade-pp-ed.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-bioasq-test \
  run.beir.splade-pp-ed.bioasq.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-bioasq.contriever-msmarco \
  --topics beir-v1.0.0-bioasq-test \
  --output run.beir.contriever-msmarco.bioasq.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
  run.beir.contriever-msmarco.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-bioasq-test \
  run.beir.contriever-msmarco.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-bioasq-test \
  run.beir.contriever-msmarco.bioasq.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-bioasq.bge-base-en-v1.5 \
  --topics beir-v1.0.0-bioasq-test \
  --output run.beir.bge-base-en-v1.5.bioasq.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-bioasq-test \
  run.beir.bge-base-en-v1.5.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-bioasq-test \
  run.beir.bge-base-en-v1.5.bioasq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-bioasq-test \
  run.beir.bge-base-en-v1.5.bioasq.txt
Command to generate run:
   | 
|  | nfcorpus | 0.3218 | 0.2457 |  | 0.3254 | 0.2500 |  | 0.3470 | 0.2844 |  | 0.3281 | 0.3008 |  | 0.3735 | 0.3368 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-nfcorpus.flat \
  --topics beir-v1.0.0-nfcorpus-test \
  --output run.beir.bm25-flat.nfcorpus.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
  run.beir.bm25-flat.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nfcorpus-test \
  run.beir.bm25-flat.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nfcorpus-test \
  run.beir.bm25-flat.nfcorpus.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-nfcorpus.multifield \
  --topics beir-v1.0.0-nfcorpus-test \
  --output run.beir.bm25-multifield.nfcorpus.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
  run.beir.bm25-multifield.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nfcorpus-test \
  run.beir.bm25-multifield.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nfcorpus-test \
  run.beir.bm25-multifield.nfcorpus.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-nfcorpus.splade-pp-ed \
  --topics beir-v1.0.0-nfcorpus.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.nfcorpus.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
  run.beir.splade-pp-ed.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nfcorpus-test \
  run.beir.splade-pp-ed.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nfcorpus-test \
  run.beir.splade-pp-ed.nfcorpus.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-nfcorpus.contriever-msmarco \
  --topics beir-v1.0.0-nfcorpus-test \
  --output run.beir.contriever-msmarco.nfcorpus.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
  run.beir.contriever-msmarco.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nfcorpus-test \
  run.beir.contriever-msmarco.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nfcorpus-test \
  run.beir.contriever-msmarco.nfcorpus.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-nfcorpus.bge-base-en-v1.5 \
  --topics beir-v1.0.0-nfcorpus-test \
  --output run.beir.bge-base-en-v1.5.nfcorpus.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nfcorpus-test \
  run.beir.bge-base-en-v1.5.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nfcorpus-test \
  run.beir.bge-base-en-v1.5.nfcorpus.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nfcorpus-test \
  run.beir.bge-base-en-v1.5.nfcorpus.txt
Command to generate run:
   | 
|  | nq | 0.3055 | 0.7513 |  | 0.3285 | 0.7597 |  | 0.5378 | 0.9296 |  | 0.4977 | 0.9252 |  | 0.5414 | 0.9415 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-nq.flat \
  --topics beir-v1.0.0-nq-test \
  --output run.beir.bm25-flat.nq.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nq-test \
  run.beir.bm25-flat.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nq-test \
  run.beir.bm25-flat.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nq-test \
  run.beir.bm25-flat.nq.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-nq.multifield \
  --topics beir-v1.0.0-nq-test \
  --output run.beir.bm25-multifield.nq.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nq-test \
  run.beir.bm25-multifield.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nq-test \
  run.beir.bm25-multifield.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nq-test \
  run.beir.bm25-multifield.nq.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-nq.splade-pp-ed \
  --topics beir-v1.0.0-nq.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.nq.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nq-test \
  run.beir.splade-pp-ed.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nq-test \
  run.beir.splade-pp-ed.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nq-test \
  run.beir.splade-pp-ed.nq.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-nq.contriever-msmarco \
  --topics beir-v1.0.0-nq-test \
  --output run.beir.contriever-msmarco.nq.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nq-test \
  run.beir.contriever-msmarco.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nq-test \
  run.beir.contriever-msmarco.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nq-test \
  run.beir.contriever-msmarco.nq.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-nq.bge-base-en-v1.5 \
  --topics beir-v1.0.0-nq-test \
  --output run.beir.bge-base-en-v1.5.nq.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-nq-test \
  run.beir.bge-base-en-v1.5.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-nq-test \
  run.beir.bge-base-en-v1.5.nq.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-nq-test \
  run.beir.bge-base-en-v1.5.nq.txt
Command to generate run:
   | 
|  | hotpotqa | 0.6330 | 0.7957 |  | 0.6027 | 0.7400 |  | 0.6868 | 0.8177 |  | 0.6376 | 0.7772 |  | 0.7259 | 0.8726 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-hotpotqa.flat \
  --topics beir-v1.0.0-hotpotqa-test \
  --output run.beir.bm25-flat.hotpotqa.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
  run.beir.bm25-flat.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-hotpotqa-test \
  run.beir.bm25-flat.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-hotpotqa-test \
  run.beir.bm25-flat.hotpotqa.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-hotpotqa.multifield \
  --topics beir-v1.0.0-hotpotqa-test \
  --output run.beir.bm25-multifield.hotpotqa.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
  run.beir.bm25-multifield.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-hotpotqa-test \
  run.beir.bm25-multifield.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-hotpotqa-test \
  run.beir.bm25-multifield.hotpotqa.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-hotpotqa.splade-pp-ed \
  --topics beir-v1.0.0-hotpotqa.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.hotpotqa.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
  run.beir.splade-pp-ed.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-hotpotqa-test \
  run.beir.splade-pp-ed.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-hotpotqa-test \
  run.beir.splade-pp-ed.hotpotqa.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-hotpotqa.contriever-msmarco \
  --topics beir-v1.0.0-hotpotqa-test \
  --output run.beir.contriever-msmarco.hotpotqa.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
  run.beir.contriever-msmarco.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-hotpotqa-test \
  run.beir.contriever-msmarco.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-hotpotqa-test \
  run.beir.contriever-msmarco.hotpotqa.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-hotpotqa.bge-base-en-v1.5 \
  --topics beir-v1.0.0-hotpotqa-test \
  --output run.beir.bge-base-en-v1.5.hotpotqa.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-hotpotqa-test \
  run.beir.bge-base-en-v1.5.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-hotpotqa-test \
  run.beir.bge-base-en-v1.5.hotpotqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-hotpotqa-test \
  run.beir.bge-base-en-v1.5.hotpotqa.txt
Command to generate run:
   | 
|  | fiqa | 0.2361 | 0.5395 |  | 0.2361 | 0.5395 |  | 0.3475 | 0.6314 |  | 0.3293 | 0.6558 |  | 0.4065 | 0.7415 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-fiqa.flat \
  --topics beir-v1.0.0-fiqa-test \
  --output run.beir.bm25-flat.fiqa.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
  run.beir.bm25-flat.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fiqa-test \
  run.beir.bm25-flat.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fiqa-test \
  run.beir.bm25-flat.fiqa.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-fiqa.multifield \
  --topics beir-v1.0.0-fiqa-test \
  --output run.beir.bm25-multifield.fiqa.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
  run.beir.bm25-multifield.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fiqa-test \
  run.beir.bm25-multifield.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fiqa-test \
  run.beir.bm25-multifield.fiqa.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-fiqa.splade-pp-ed \
  --topics beir-v1.0.0-fiqa.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.fiqa.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
  run.beir.splade-pp-ed.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fiqa-test \
  run.beir.splade-pp-ed.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fiqa-test \
  run.beir.splade-pp-ed.fiqa.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-fiqa.contriever-msmarco \
  --topics beir-v1.0.0-fiqa-test \
  --output run.beir.contriever-msmarco.fiqa.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
  run.beir.contriever-msmarco.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fiqa-test \
  run.beir.contriever-msmarco.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fiqa-test \
  run.beir.contriever-msmarco.fiqa.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-fiqa.bge-base-en-v1.5 \
  --topics beir-v1.0.0-fiqa-test \
  --output run.beir.bge-base-en-v1.5.fiqa.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fiqa-test \
  run.beir.bge-base-en-v1.5.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fiqa-test \
  run.beir.bge-base-en-v1.5.fiqa.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fiqa-test \
  run.beir.bge-base-en-v1.5.fiqa.txt
Command to generate run:
   | 
|  | signal1m | 0.3304 | 0.3703 |  | 0.3304 | 0.3703 |  | 0.3008 | 0.3398 |  | 0.2783 | 0.3220 |  | 0.2886 | 0.3112 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-signal1m.flat \
  --topics beir-v1.0.0-signal1m-test \
  --output run.beir.bm25-flat.signal1m.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
  run.beir.bm25-flat.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-signal1m-test \
  run.beir.bm25-flat.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-signal1m-test \
  run.beir.bm25-flat.signal1m.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-signal1m.multifield \
  --topics beir-v1.0.0-signal1m-test \
  --output run.beir.bm25-multifield.signal1m.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
  run.beir.bm25-multifield.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-signal1m-test \
  run.beir.bm25-multifield.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-signal1m-test \
  run.beir.bm25-multifield.signal1m.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-signal1m.splade-pp-ed \
  --topics beir-v1.0.0-signal1m.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.signal1m.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
  run.beir.splade-pp-ed.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-signal1m-test \
  run.beir.splade-pp-ed.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-signal1m-test \
  run.beir.splade-pp-ed.signal1m.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-signal1m.contriever-msmarco \
  --topics beir-v1.0.0-signal1m-test \
  --output run.beir.contriever-msmarco.signal1m.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
  run.beir.contriever-msmarco.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-signal1m-test \
  run.beir.contriever-msmarco.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-signal1m-test \
  run.beir.contriever-msmarco.signal1m.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-signal1m.bge-base-en-v1.5 \
  --topics beir-v1.0.0-signal1m-test \
  --output run.beir.bge-base-en-v1.5.signal1m.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-signal1m-test \
  run.beir.bge-base-en-v1.5.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-signal1m-test \
  run.beir.bge-base-en-v1.5.signal1m.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-signal1m-test \
  run.beir.bge-base-en-v1.5.signal1m.txt
Command to generate run:
   | 
|  | trec-news | 0.3952 | 0.4469 |  | 0.3977 | 0.4216 |  | 0.4152 | 0.4414 |  | 0.4283 | 0.4924 |  | 0.4424 | 0.4992 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-trec-news.flat \
  --topics beir-v1.0.0-trec-news-test \
  --output run.beir.bm25-flat.trec-news.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
  run.beir.bm25-flat.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-news-test \
  run.beir.bm25-flat.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-news-test \
  run.beir.bm25-flat.trec-news.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-trec-news.multifield \
  --topics beir-v1.0.0-trec-news-test \
  --output run.beir.bm25-multifield.trec-news.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
  run.beir.bm25-multifield.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-news-test \
  run.beir.bm25-multifield.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-news-test \
  run.beir.bm25-multifield.trec-news.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-trec-news.splade-pp-ed \
  --topics beir-v1.0.0-trec-news.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.trec-news.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
  run.beir.splade-pp-ed.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-news-test \
  run.beir.splade-pp-ed.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-news-test \
  run.beir.splade-pp-ed.trec-news.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-trec-news.contriever-msmarco \
  --topics beir-v1.0.0-trec-news-test \
  --output run.beir.contriever-msmarco.trec-news.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
  run.beir.contriever-msmarco.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-news-test \
  run.beir.contriever-msmarco.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-news-test \
  run.beir.contriever-msmarco.trec-news.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-trec-news.bge-base-en-v1.5 \
  --topics beir-v1.0.0-trec-news-test \
  --output run.beir.bge-base-en-v1.5.trec-news.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-trec-news-test \
  run.beir.bge-base-en-v1.5.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-trec-news-test \
  run.beir.bge-base-en-v1.5.trec-news.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-trec-news-test \
  run.beir.bge-base-en-v1.5.trec-news.txt
Command to generate run:
   | 
|  | robust04 | 0.4070 | 0.3746 |  | 0.4070 | 0.3746 |  | 0.4679 | 0.3850 |  | 0.4729 | 0.3917 |  | 0.4435 | 0.3510 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-robust04.flat \
  --topics beir-v1.0.0-robust04-test \
  --output run.beir.bm25-flat.robust04.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
  run.beir.bm25-flat.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-robust04-test \
  run.beir.bm25-flat.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-robust04-test \
  run.beir.bm25-flat.robust04.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-robust04.multifield \
  --topics beir-v1.0.0-robust04-test \
  --output run.beir.bm25-multifield.robust04.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
  run.beir.bm25-multifield.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-robust04-test \
  run.beir.bm25-multifield.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-robust04-test \
  run.beir.bm25-multifield.robust04.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-robust04.splade-pp-ed \
  --topics beir-v1.0.0-robust04.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.robust04.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
  run.beir.splade-pp-ed.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-robust04-test \
  run.beir.splade-pp-ed.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-robust04-test \
  run.beir.splade-pp-ed.robust04.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-robust04.contriever-msmarco \
  --topics beir-v1.0.0-robust04-test \
  --output run.beir.contriever-msmarco.robust04.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
  run.beir.contriever-msmarco.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-robust04-test \
  run.beir.contriever-msmarco.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-robust04-test \
  run.beir.contriever-msmarco.robust04.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-robust04.bge-base-en-v1.5 \
  --topics beir-v1.0.0-robust04-test \
  --output run.beir.bge-base-en-v1.5.robust04.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-robust04-test \
  run.beir.bge-base-en-v1.5.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-robust04-test \
  run.beir.bge-base-en-v1.5.robust04.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-robust04-test \
  run.beir.bge-base-en-v1.5.robust04.txt
Command to generate run:
   | 
|  | arguana | 0.3970 | 0.9324 |  | 0.4142 | 0.9431 |  | 0.5203 | 0.9744 |  | 0.4461 | 0.9765 |  | 0.6365 | 0.9915 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-arguana.flat \
  --topics beir-v1.0.0-arguana-test \
  --output run.beir.bm25-flat.arguana.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
  run.beir.bm25-flat.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-arguana-test \
  run.beir.bm25-flat.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-arguana-test \
  run.beir.bm25-flat.arguana.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-arguana.multifield \
  --topics beir-v1.0.0-arguana-test \
  --output run.beir.bm25-multifield.arguana.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
  run.beir.bm25-multifield.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-arguana-test \
  run.beir.bm25-multifield.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-arguana-test \
  run.beir.bm25-multifield.arguana.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-arguana.splade-pp-ed \
  --topics beir-v1.0.0-arguana.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.arguana.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
  run.beir.splade-pp-ed.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-arguana-test \
  run.beir.splade-pp-ed.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-arguana-test \
  run.beir.splade-pp-ed.arguana.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-arguana.contriever-msmarco \
  --topics beir-v1.0.0-arguana-test \
  --output run.beir.contriever-msmarco.arguana.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
  run.beir.contriever-msmarco.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-arguana-test \
  run.beir.contriever-msmarco.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-arguana-test \
  run.beir.contriever-msmarco.arguana.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "" \
  --index beir-v1.0.0-arguana.bge-base-en-v1.5 \
  --topics beir-v1.0.0-arguana-test \
  --output run.beir.bge-base-en-v1.5.arguana.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-arguana-test \
  run.beir.bge-base-en-v1.5.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-arguana-test \
  run.beir.bge-base-en-v1.5.arguana.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-arguana-test \
  run.beir.bge-base-en-v1.5.arguana.txt
Command to generate run:
   | 
|  | webis-touche2020 | 0.4422 | 0.5822 |  | 0.3673 | 0.5376 |  | 0.2468 | 0.4715 |  | 0.2040 | 0.4420 |  | 0.2571 | 0.4867 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-webis-touche2020.flat \
  --topics beir-v1.0.0-webis-touche2020-test \
  --output run.beir.bm25-flat.webis-touche2020.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
  run.beir.bm25-flat.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-webis-touche2020-test \
  run.beir.bm25-flat.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
  run.beir.bm25-flat.webis-touche2020.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-webis-touche2020.multifield \
  --topics beir-v1.0.0-webis-touche2020-test \
  --output run.beir.bm25-multifield.webis-touche2020.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
  run.beir.bm25-multifield.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-webis-touche2020-test \
  run.beir.bm25-multifield.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
  run.beir.bm25-multifield.webis-touche2020.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-webis-touche2020.splade-pp-ed \
  --topics beir-v1.0.0-webis-touche2020.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.webis-touche2020.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
  run.beir.splade-pp-ed.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-webis-touche2020-test \
  run.beir.splade-pp-ed.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
  run.beir.splade-pp-ed.webis-touche2020.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-webis-touche2020.contriever-msmarco \
  --topics beir-v1.0.0-webis-touche2020-test \
  --output run.beir.contriever-msmarco.webis-touche2020.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
  run.beir.contriever-msmarco.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-webis-touche2020-test \
  run.beir.contriever-msmarco.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
  run.beir.contriever-msmarco.webis-touche2020.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-webis-touche2020.bge-base-en-v1.5 \
  --topics beir-v1.0.0-webis-touche2020-test \
  --output run.beir.bge-base-en-v1.5.webis-touche2020.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-webis-touche2020-test \
  run.beir.bge-base-en-v1.5.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-webis-touche2020-test \
  run.beir.bge-base-en-v1.5.webis-touche2020.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-webis-touche2020-test \
  run.beir.bge-base-en-v1.5.webis-touche2020.txt
Command to generate run:
   | 
|  | cqadupstack-android | 0.3801 | 0.6829 |  | 0.3709 | 0.6889 |  | 0.3904 | 0.7404 |  | 0.4255 | 0.7503 |  | 0.5076 | 0.8454 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-android.flat \
  --topics beir-v1.0.0-cqadupstack-android-test \
  --output run.beir.bm25-flat.cqadupstack-android.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
  run.beir.bm25-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
  run.beir.bm25-flat.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
  run.beir.bm25-flat.cqadupstack-android.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-android.multifield \
  --topics beir-v1.0.0-cqadupstack-android-test \
  --output run.beir.bm25-multifield.cqadupstack-android.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
  run.beir.bm25-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
  run.beir.bm25-multifield.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
  run.beir.bm25-multifield.cqadupstack-android.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-android.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-android.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-android.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
  run.beir.splade-pp-ed.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
  run.beir.splade-pp-ed.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
  run.beir.splade-pp-ed.cqadupstack-android.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-android.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-android-test \
  --output run.beir.contriever-msmarco.cqadupstack-android.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
  run.beir.contriever-msmarco.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
  run.beir.contriever-msmarco.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
  run.beir.contriever-msmarco.cqadupstack-android.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-android.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-android-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-android.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-android-test \
  run.beir.bge-base-en-v1.5.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-android-test \
  run.beir.bge-base-en-v1.5.cqadupstack-android.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-android-test \
  run.beir.bge-base-en-v1.5.cqadupstack-android.txt
Command to generate run:
   | 
|  | cqadupstack-english | 0.3453 | 0.5757 |  | 0.3321 | 0.5842 |  | 0.4079 | 0.6946 |  | 0.4326 | 0.6935 |  | 0.4857 | 0.7586 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-english.flat \
  --topics beir-v1.0.0-cqadupstack-english-test \
  --output run.beir.bm25-flat.cqadupstack-english.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
  run.beir.bm25-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
  run.beir.bm25-flat.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
  run.beir.bm25-flat.cqadupstack-english.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-english.multifield \
  --topics beir-v1.0.0-cqadupstack-english-test \
  --output run.beir.bm25-multifield.cqadupstack-english.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
  run.beir.bm25-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
  run.beir.bm25-multifield.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
  run.beir.bm25-multifield.cqadupstack-english.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-english.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-english.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-english.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
  run.beir.splade-pp-ed.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
  run.beir.splade-pp-ed.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
  run.beir.splade-pp-ed.cqadupstack-english.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-english.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-english-test \
  --output run.beir.contriever-msmarco.cqadupstack-english.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
  run.beir.contriever-msmarco.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
  run.beir.contriever-msmarco.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
  run.beir.contriever-msmarco.cqadupstack-english.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-english.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-english-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-english.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-english-test \
  run.beir.bge-base-en-v1.5.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-english-test \
  run.beir.bge-base-en-v1.5.cqadupstack-english.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-english-test \
  run.beir.bge-base-en-v1.5.cqadupstack-english.txt
Command to generate run:
   | 
|  | cqadupstack-gaming | 0.4822 | 0.7651 |  | 0.4418 | 0.7571 |  | 0.4957 | 0.8131 |  | 0.5276 | 0.8481 |  | 0.5967 | 0.9036 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-gaming.flat \
  --topics beir-v1.0.0-cqadupstack-gaming-test \
  --output run.beir.bm25-flat.cqadupstack-gaming.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.bm25-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.bm25-flat.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.bm25-flat.cqadupstack-gaming.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-gaming.multifield \
  --topics beir-v1.0.0-cqadupstack-gaming-test \
  --output run.beir.bm25-multifield.cqadupstack-gaming.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.bm25-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.bm25-multifield.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.bm25-multifield.cqadupstack-gaming.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-gaming.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-gaming.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-gaming.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.splade-pp-ed.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.splade-pp-ed.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.splade-pp-ed.cqadupstack-gaming.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-gaming.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-gaming-test \
  --output run.beir.contriever-msmarco.cqadupstack-gaming.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.contriever-msmarco.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.contriever-msmarco.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.contriever-msmarco.cqadupstack-gaming.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-gaming.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-gaming-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-gaming.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.bge-base-en-v1.5.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.bge-base-en-v1.5.cqadupstack-gaming.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gaming-test \
  run.beir.bge-base-en-v1.5.cqadupstack-gaming.txt
Command to generate run:
   | 
|  | cqadupstack-gis | 0.2901 | 0.6119 |  | 0.2904 | 0.6458 |  | 0.3150 | 0.6320 |  | 0.3022 | 0.6272 |  | 0.4127 | 0.7682 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-gis.flat \
  --topics beir-v1.0.0-cqadupstack-gis-test \
  --output run.beir.bm25-flat.cqadupstack-gis.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.bm25-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.bm25-flat.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.bm25-flat.cqadupstack-gis.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-gis.multifield \
  --topics beir-v1.0.0-cqadupstack-gis-test \
  --output run.beir.bm25-multifield.cqadupstack-gis.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.bm25-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.bm25-multifield.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.bm25-multifield.cqadupstack-gis.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-gis.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-gis.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-gis.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.splade-pp-ed.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.splade-pp-ed.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.splade-pp-ed.cqadupstack-gis.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-gis.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-gis-test \
  --output run.beir.contriever-msmarco.cqadupstack-gis.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.contriever-msmarco.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.contriever-msmarco.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.contriever-msmarco.cqadupstack-gis.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-gis.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-gis-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-gis.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.bge-base-en-v1.5.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.bge-base-en-v1.5.cqadupstack-gis.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-gis-test \
  run.beir.bge-base-en-v1.5.cqadupstack-gis.txt
Command to generate run:
   | 
|  | cqadupstack-mathematica | 0.2015 | 0.4877 |  | 0.2046 | 0.5215 |  | 0.2377 | 0.5797 |  | 0.2355 | 0.5726 |  | 0.3163 | 0.6922 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-mathematica.flat \
  --topics beir-v1.0.0-cqadupstack-mathematica-test \
  --output run.beir.bm25-flat.cqadupstack-mathematica.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.bm25-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.bm25-flat.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.bm25-flat.cqadupstack-mathematica.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-mathematica.multifield \
  --topics beir-v1.0.0-cqadupstack-mathematica-test \
  --output run.beir.bm25-multifield.cqadupstack-mathematica.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.bm25-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.bm25-multifield.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.bm25-multifield.cqadupstack-mathematica.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-mathematica.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-mathematica.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-mathematica.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.splade-pp-ed.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.splade-pp-ed.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.splade-pp-ed.cqadupstack-mathematica.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-mathematica.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-mathematica-test \
  --output run.beir.contriever-msmarco.cqadupstack-mathematica.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.contriever-msmarco.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.contriever-msmarco.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.contriever-msmarco.cqadupstack-mathematica.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-mathematica.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-mathematica-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-mathematica.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.bge-base-en-v1.5.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.bge-base-en-v1.5.cqadupstack-mathematica.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-mathematica-test \
  run.beir.bge-base-en-v1.5.cqadupstack-mathematica.txt
Command to generate run:
   | 
|  | cqadupstack-physics | 0.3214 | 0.6326 |  | 0.3248 | 0.6486 |  | 0.3599 | 0.7196 |  | 0.4159 | 0.7619 |  | 0.4724 | 0.8078 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-physics.flat \
  --topics beir-v1.0.0-cqadupstack-physics-test \
  --output run.beir.bm25-flat.cqadupstack-physics.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.bm25-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.bm25-flat.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.bm25-flat.cqadupstack-physics.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-physics.multifield \
  --topics beir-v1.0.0-cqadupstack-physics-test \
  --output run.beir.bm25-multifield.cqadupstack-physics.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.bm25-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.bm25-multifield.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.bm25-multifield.cqadupstack-physics.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-physics.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-physics.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-physics.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.splade-pp-ed.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.splade-pp-ed.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.splade-pp-ed.cqadupstack-physics.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-physics.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-physics-test \
  --output run.beir.contriever-msmarco.cqadupstack-physics.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.contriever-msmarco.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.contriever-msmarco.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.contriever-msmarco.cqadupstack-physics.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-physics.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-physics-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-physics.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.bge-base-en-v1.5.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.bge-base-en-v1.5.cqadupstack-physics.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-physics-test \
  run.beir.bge-base-en-v1.5.cqadupstack-physics.txt
Command to generate run:
   | 
|  | cqadupstack-programmers | 0.2802 | 0.5588 |  | 0.2963 | 0.6194 |  | 0.3401 | 0.6585 |  | 0.3574 | 0.7191 |  | 0.4238 | 0.7856 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-programmers.flat \
  --topics beir-v1.0.0-cqadupstack-programmers-test \
  --output run.beir.bm25-flat.cqadupstack-programmers.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.bm25-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.bm25-flat.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.bm25-flat.cqadupstack-programmers.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-programmers.multifield \
  --topics beir-v1.0.0-cqadupstack-programmers-test \
  --output run.beir.bm25-multifield.cqadupstack-programmers.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.bm25-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.bm25-multifield.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.bm25-multifield.cqadupstack-programmers.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-programmers.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-programmers.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-programmers.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.splade-pp-ed.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.splade-pp-ed.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.splade-pp-ed.cqadupstack-programmers.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-programmers.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-programmers-test \
  --output run.beir.contriever-msmarco.cqadupstack-programmers.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.contriever-msmarco.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.contriever-msmarco.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.contriever-msmarco.cqadupstack-programmers.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-programmers.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-programmers-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-programmers.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.bge-base-en-v1.5.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.bge-base-en-v1.5.cqadupstack-programmers.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-programmers-test \
  run.beir.bge-base-en-v1.5.cqadupstack-programmers.txt
Command to generate run:
   | 
|  | cqadupstack-stats | 0.2711 | 0.5338 |  | 0.2790 | 0.5719 |  | 0.2990 | 0.5894 |  | 0.3095 | 0.5860 |  | 0.3732 | 0.6727 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-stats.flat \
  --topics beir-v1.0.0-cqadupstack-stats-test \
  --output run.beir.bm25-flat.cqadupstack-stats.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.bm25-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.bm25-flat.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.bm25-flat.cqadupstack-stats.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-stats.multifield \
  --topics beir-v1.0.0-cqadupstack-stats-test \
  --output run.beir.bm25-multifield.cqadupstack-stats.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.bm25-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.bm25-multifield.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.bm25-multifield.cqadupstack-stats.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-stats.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-stats.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-stats.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.splade-pp-ed.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.splade-pp-ed.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.splade-pp-ed.cqadupstack-stats.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-stats.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-stats-test \
  --output run.beir.contriever-msmarco.cqadupstack-stats.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.contriever-msmarco.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.contriever-msmarco.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.contriever-msmarco.cqadupstack-stats.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-stats.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-stats-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-stats.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.bge-base-en-v1.5.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.bge-base-en-v1.5.cqadupstack-stats.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-stats-test \
  run.beir.bge-base-en-v1.5.cqadupstack-stats.txt
Command to generate run:
   | 
|  | cqadupstack-tex | 0.2244 | 0.4686 |  | 0.2086 | 0.4954 |  | 0.2530 | 0.5161 |  | 0.2209 | 0.4985 |  | 0.3115 | 0.6489 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-tex.flat \
  --topics beir-v1.0.0-cqadupstack-tex-test \
  --output run.beir.bm25-flat.cqadupstack-tex.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.bm25-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.bm25-flat.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.bm25-flat.cqadupstack-tex.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-tex.multifield \
  --topics beir-v1.0.0-cqadupstack-tex-test \
  --output run.beir.bm25-multifield.cqadupstack-tex.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.bm25-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.bm25-multifield.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.bm25-multifield.cqadupstack-tex.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-tex.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-tex.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-tex.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.splade-pp-ed.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.splade-pp-ed.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.splade-pp-ed.cqadupstack-tex.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-tex.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-tex-test \
  --output run.beir.contriever-msmarco.cqadupstack-tex.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.contriever-msmarco.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.contriever-msmarco.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.contriever-msmarco.cqadupstack-tex.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-tex.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-tex-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-tex.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.bge-base-en-v1.5.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.bge-base-en-v1.5.cqadupstack-tex.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-tex-test \
  run.beir.bge-base-en-v1.5.cqadupstack-tex.txt
Command to generate run:
   | 
|  | cqadupstack-unix | 0.2749 | 0.5417 |  | 0.2788 | 0.5721 |  | 0.3167 | 0.6214 |  | 0.3257 | 0.6161 |  | 0.4220 | 0.7797 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-unix.flat \
  --topics beir-v1.0.0-cqadupstack-unix-test \
  --output run.beir.bm25-flat.cqadupstack-unix.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.bm25-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.bm25-flat.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.bm25-flat.cqadupstack-unix.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-unix.multifield \
  --topics beir-v1.0.0-cqadupstack-unix-test \
  --output run.beir.bm25-multifield.cqadupstack-unix.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.bm25-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.bm25-multifield.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.bm25-multifield.cqadupstack-unix.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-unix.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-unix.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-unix.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.splade-pp-ed.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.splade-pp-ed.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.splade-pp-ed.cqadupstack-unix.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-unix.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-unix-test \
  --output run.beir.contriever-msmarco.cqadupstack-unix.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.contriever-msmarco.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.contriever-msmarco.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.contriever-msmarco.cqadupstack-unix.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-unix.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-unix-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-unix.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.bge-base-en-v1.5.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.bge-base-en-v1.5.cqadupstack-unix.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-unix-test \
  run.beir.bge-base-en-v1.5.cqadupstack-unix.txt
Command to generate run:
   | 
|  | cqadupstack-webmasters | 0.3059 | 0.5820 |  | 0.3008 | 0.6100 |  | 0.3167 | 0.6360 |  | 0.3392 | 0.7032 |  | 0.4072 | 0.7774 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-webmasters.flat \
  --topics beir-v1.0.0-cqadupstack-webmasters-test \
  --output run.beir.bm25-flat.cqadupstack-webmasters.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.bm25-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.bm25-flat.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.bm25-flat.cqadupstack-webmasters.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-webmasters.multifield \
  --topics beir-v1.0.0-cqadupstack-webmasters-test \
  --output run.beir.bm25-multifield.cqadupstack-webmasters.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.bm25-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.bm25-multifield.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.bm25-multifield.cqadupstack-webmasters.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-webmasters.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-webmasters.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-webmasters.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.splade-pp-ed.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.splade-pp-ed.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.splade-pp-ed.cqadupstack-webmasters.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-webmasters.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-webmasters-test \
  --output run.beir.contriever-msmarco.cqadupstack-webmasters.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.contriever-msmarco.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.contriever-msmarco.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.contriever-msmarco.cqadupstack-webmasters.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-webmasters.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-webmasters-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-webmasters.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.bge-base-en-v1.5.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.bge-base-en-v1.5.cqadupstack-webmasters.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-webmasters-test \
  run.beir.bge-base-en-v1.5.cqadupstack-webmasters.txt
Command to generate run:
   | 
|  | cqadupstack-wordpress | 0.2483 | 0.5152 |  | 0.2562 | 0.5526 |  | 0.2733 | 0.5945 |  | 0.2532 | 0.5769 |  | 0.3547 | 0.7047 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-wordpress.flat \
  --topics beir-v1.0.0-cqadupstack-wordpress-test \
  --output run.beir.bm25-flat.cqadupstack-wordpress.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.bm25-flat.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.bm25-flat.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.bm25-flat.cqadupstack-wordpress.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-wordpress.multifield \
  --topics beir-v1.0.0-cqadupstack-wordpress-test \
  --output run.beir.bm25-multifield.cqadupstack-wordpress.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.bm25-multifield.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.bm25-multifield.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.bm25-multifield.cqadupstack-wordpress.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-cqadupstack-wordpress.splade-pp-ed \
  --topics beir-v1.0.0-cqadupstack-wordpress.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.cqadupstack-wordpress.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.splade-pp-ed.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.splade-pp-ed.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.splade-pp-ed.cqadupstack-wordpress.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-cqadupstack-wordpress.contriever-msmarco \
  --topics beir-v1.0.0-cqadupstack-wordpress-test \
  --output run.beir.contriever-msmarco.cqadupstack-wordpress.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.contriever-msmarco.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.contriever-msmarco.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.contriever-msmarco.cqadupstack-wordpress.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-cqadupstack-wordpress.bge-base-en-v1.5 \
  --topics beir-v1.0.0-cqadupstack-wordpress-test \
  --output run.beir.bge-base-en-v1.5.cqadupstack-wordpress.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.bge-base-en-v1.5.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.bge-base-en-v1.5.cqadupstack-wordpress.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-cqadupstack-wordpress-test \
  run.beir.bge-base-en-v1.5.cqadupstack-wordpress.txt
Command to generate run:
   | 
|  | quora | 0.7886 | 0.9733 |  | 0.7886 | 0.9733 |  | 0.8343 | 0.9863 |  | 0.8648 | 0.9935 |  | 0.8890 | 0.9968 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-quora.flat \
  --topics beir-v1.0.0-quora-test \
  --output run.beir.bm25-flat.quora.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-quora-test \
  run.beir.bm25-flat.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-quora-test \
  run.beir.bm25-flat.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-quora-test \
  run.beir.bm25-flat.quora.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-quora.multifield \
  --topics beir-v1.0.0-quora-test \
  --output run.beir.bm25-multifield.quora.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-quora-test \
  run.beir.bm25-multifield.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-quora-test \
  run.beir.bm25-multifield.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-quora-test \
  run.beir.bm25-multifield.quora.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-quora.splade-pp-ed \
  --topics beir-v1.0.0-quora.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.quora.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-quora-test \
  run.beir.splade-pp-ed.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-quora-test \
  run.beir.splade-pp-ed.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-quora-test \
  run.beir.splade-pp-ed.quora.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-quora.contriever-msmarco \
  --topics beir-v1.0.0-quora-test \
  --output run.beir.contriever-msmarco.quora.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-quora-test \
  run.beir.contriever-msmarco.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-quora-test \
  run.beir.contriever-msmarco.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-quora-test \
  run.beir.contriever-msmarco.quora.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "" \
  --index beir-v1.0.0-quora.bge-base-en-v1.5 \
  --topics beir-v1.0.0-quora-test \
  --output run.beir.bge-base-en-v1.5.quora.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-quora-test \
  run.beir.bge-base-en-v1.5.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-quora-test \
  run.beir.bge-base-en-v1.5.quora.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-quora-test \
  run.beir.bge-base-en-v1.5.quora.txt
Command to generate run:
   | 
|  | dbpedia-entity | 0.3180 | 0.4682 |  | 0.3128 | 0.3981 |  | 0.4366 | 0.5624 |  | 0.4128 | 0.5414 |  | 0.4073 | 0.5298 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-dbpedia-entity.flat \
  --topics beir-v1.0.0-dbpedia-entity-test \
  --output run.beir.bm25-flat.dbpedia-entity.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
  run.beir.bm25-flat.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
  run.beir.bm25-flat.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
  run.beir.bm25-flat.dbpedia-entity.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-dbpedia-entity.multifield \
  --topics beir-v1.0.0-dbpedia-entity-test \
  --output run.beir.bm25-multifield.dbpedia-entity.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
  run.beir.bm25-multifield.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
  run.beir.bm25-multifield.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
  run.beir.bm25-multifield.dbpedia-entity.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-dbpedia-entity.splade-pp-ed \
  --topics beir-v1.0.0-dbpedia-entity.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.dbpedia-entity.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
  run.beir.splade-pp-ed.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
  run.beir.splade-pp-ed.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
  run.beir.splade-pp-ed.dbpedia-entity.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-dbpedia-entity.contriever-msmarco \
  --topics beir-v1.0.0-dbpedia-entity-test \
  --output run.beir.contriever-msmarco.dbpedia-entity.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
  run.beir.contriever-msmarco.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
  run.beir.contriever-msmarco.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
  run.beir.contriever-msmarco.dbpedia-entity.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-dbpedia-entity.bge-base-en-v1.5 \
  --topics beir-v1.0.0-dbpedia-entity-test \
  --output run.beir.bge-base-en-v1.5.dbpedia-entity.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-dbpedia-entity-test \
  run.beir.bge-base-en-v1.5.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-dbpedia-entity-test \
  run.beir.bge-base-en-v1.5.dbpedia-entity.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-dbpedia-entity-test \
  run.beir.bge-base-en-v1.5.dbpedia-entity.txt
Command to generate run:
   | 
|  | scidocs | 0.1490 | 0.3477 |  | 0.1581 | 0.3561 |  | 0.1591 | 0.3730 |  | 0.1652 | 0.3783 |  | 0.2172 | 0.4959 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-scidocs.flat \
  --topics beir-v1.0.0-scidocs-test \
  --output run.beir.bm25-flat.scidocs.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
  run.beir.bm25-flat.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scidocs-test \
  run.beir.bm25-flat.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scidocs-test \
  run.beir.bm25-flat.scidocs.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-scidocs.multifield \
  --topics beir-v1.0.0-scidocs-test \
  --output run.beir.bm25-multifield.scidocs.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
  run.beir.bm25-multifield.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scidocs-test \
  run.beir.bm25-multifield.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scidocs-test \
  run.beir.bm25-multifield.scidocs.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-scidocs.splade-pp-ed \
  --topics beir-v1.0.0-scidocs.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.scidocs.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
  run.beir.splade-pp-ed.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scidocs-test \
  run.beir.splade-pp-ed.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scidocs-test \
  run.beir.splade-pp-ed.scidocs.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-scidocs.contriever-msmarco \
  --topics beir-v1.0.0-scidocs-test \
  --output run.beir.contriever-msmarco.scidocs.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
  run.beir.contriever-msmarco.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scidocs-test \
  run.beir.contriever-msmarco.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scidocs-test \
  run.beir.contriever-msmarco.scidocs.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-scidocs.bge-base-en-v1.5 \
  --topics beir-v1.0.0-scidocs-test \
  --output run.beir.bge-base-en-v1.5.scidocs.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scidocs-test \
  run.beir.bge-base-en-v1.5.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scidocs-test \
  run.beir.bge-base-en-v1.5.scidocs.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scidocs-test \
  run.beir.bge-base-en-v1.5.scidocs.txt
Command to generate run:
   | 
|  | fever | 0.6513 | 0.9185 |  | 0.7530 | 0.9309 |  | 0.7882 | 0.9459 |  | 0.7583 | 0.9494 |  | 0.8629 | 0.9719 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-fever.flat \
  --topics beir-v1.0.0-fever-test \
  --output run.beir.bm25-flat.fever.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fever-test \
  run.beir.bm25-flat.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fever-test \
  run.beir.bm25-flat.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fever-test \
  run.beir.bm25-flat.fever.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-fever.multifield \
  --topics beir-v1.0.0-fever-test \
  --output run.beir.bm25-multifield.fever.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fever-test \
  run.beir.bm25-multifield.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fever-test \
  run.beir.bm25-multifield.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fever-test \
  run.beir.bm25-multifield.fever.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-fever.splade-pp-ed \
  --topics beir-v1.0.0-fever.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.fever.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fever-test \
  run.beir.splade-pp-ed.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fever-test \
  run.beir.splade-pp-ed.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fever-test \
  run.beir.splade-pp-ed.fever.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-fever.contriever-msmarco \
  --topics beir-v1.0.0-fever-test \
  --output run.beir.contriever-msmarco.fever.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fever-test \
  run.beir.contriever-msmarco.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fever-test \
  run.beir.contriever-msmarco.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fever-test \
  run.beir.contriever-msmarco.fever.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-fever.bge-base-en-v1.5 \
  --topics beir-v1.0.0-fever-test \
  --output run.beir.bge-base-en-v1.5.fever.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-fever-test \
  run.beir.bge-base-en-v1.5.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-fever-test \
  run.beir.bge-base-en-v1.5.fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-fever-test \
  run.beir.bge-base-en-v1.5.fever.txt
Command to generate run:
   | 
|  | climate-fever | 0.1651 | 0.4249 |  | 0.2129 | 0.4357 |  | 0.2297 | 0.5211 |  | 0.2371 | 0.5746 |  | 0.3122 | 0.6362 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-climate-fever.flat \
  --topics beir-v1.0.0-climate-fever-test \
  --output run.beir.bm25-flat.climate-fever.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
  run.beir.bm25-flat.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-climate-fever-test \
  run.beir.bm25-flat.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-climate-fever-test \
  run.beir.bm25-flat.climate-fever.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-climate-fever.multifield \
  --topics beir-v1.0.0-climate-fever-test \
  --output run.beir.bm25-multifield.climate-fever.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
  run.beir.bm25-multifield.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-climate-fever-test \
  run.beir.bm25-multifield.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-climate-fever-test \
  run.beir.bm25-multifield.climate-fever.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-climate-fever.splade-pp-ed \
  --topics beir-v1.0.0-climate-fever.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.climate-fever.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
  run.beir.splade-pp-ed.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-climate-fever-test \
  run.beir.splade-pp-ed.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-climate-fever-test \
  run.beir.splade-pp-ed.climate-fever.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-climate-fever.contriever-msmarco \
  --topics beir-v1.0.0-climate-fever-test \
  --output run.beir.contriever-msmarco.climate-fever.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
  run.beir.contriever-msmarco.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-climate-fever-test \
  run.beir.contriever-msmarco.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-climate-fever-test \
  run.beir.contriever-msmarco.climate-fever.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-climate-fever.bge-base-en-v1.5 \
  --topics beir-v1.0.0-climate-fever-test \
  --output run.beir.bge-base-en-v1.5.climate-fever.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-climate-fever-test \
  run.beir.bge-base-en-v1.5.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-climate-fever-test \
  run.beir.bge-base-en-v1.5.climate-fever.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-climate-fever-test \
  run.beir.bge-base-en-v1.5.climate-fever.txt
Command to generate run:
   | 
|  | scifact | 0.6789 | 0.9253 |  | 0.6647 | 0.9076 |  | 0.7041 | 0.9353 |  | 0.6768 | 0.9470 |  | 0.7376 | 0.9700 | 
|  | 
  
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-scifact.flat \
  --topics beir-v1.0.0-scifact-test \
  --output run.beir.bm25-flat.scifact.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
  run.beir.bm25-flat.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scifact-test \
  run.beir.bm25-flat.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scifact-test \
  run.beir.bm25-flat.scifact.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-scifact.multifield \
  --topics beir-v1.0.0-scifact-test \
  --output run.beir.bm25-multifield.scifact.txt \
  --output-format trec \
  --hits 1000 --bm25 --remove-query --fields contents=1.0 title=1.0
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
  run.beir.bm25-multifield.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scifact-test \
  run.beir.bm25-multifield.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scifact-test \
  run.beir.bm25-multifield.scifact.txt
Command to generate run:
   
python -m pyserini.search.lucene \
  --threads 16 --batch-size 128 \
  --index beir-v1.0.0-scifact.splade-pp-ed \
  --topics beir-v1.0.0-scifact.test.splade-pp-ed \
  --output run.beir.splade-pp-ed.scifact.txt \
  --output-format trec \
  --hits 1000 --impact --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
  run.beir.splade-pp-ed.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scifact-test \
  run.beir.splade-pp-ed.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scifact-test \
  run.beir.splade-pp-ed.scifact.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class contriever --encoder facebook/contriever-msmarco \
  --index beir-v1.0.0-scifact.contriever-msmarco \
  --topics beir-v1.0.0-scifact-test \
  --output run.beir.contriever-msmarco.scifact.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
  run.beir.contriever-msmarco.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scifact-test \
  run.beir.contriever-msmarco.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scifact-test \
  run.beir.contriever-msmarco.scifact.txt
Command to generate run:
   
python -m pyserini.search.faiss \
  --threads 16 --batch-size 512 \
  --encoder-class auto --encoder BAAI/bge-base-en-v1.5 --l2-norm \
  --query-prefix "Represent this sentence for searching relevant passages:" \
  --index beir-v1.0.0-scifact.bge-base-en-v1.5 \
  --topics beir-v1.0.0-scifact-test \
  --output run.beir.bge-base-en-v1.5.scifact.txt \
  --hits 1000 --remove-query
 
Evaluation commands:
   
python -m pyserini.eval.trec_eval \
  -c -m ndcg_cut.10 beir-v1.0.0-scifact-test \
  run.beir.bge-base-en-v1.5.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.100 beir-v1.0.0-scifact-test \
  run.beir.bge-base-en-v1.5.scifact.txt
python -m pyserini.eval.trec_eval \
  -c -m recall.1000 beir-v1.0.0-scifact-test \
  run.beir.bge-base-en-v1.5.scifact.txt
Command to generate run:
   |