Datasets:
mteb
/

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
Samoed commited on
Commit
97bd0d8
·
verified ·
1 Parent(s): 7cd3f36

Add dataset card

Browse files
Files changed (1) hide show
  1. README.md +19 -19
README.md CHANGED
@@ -54,25 +54,25 @@ If you use this dataset, please cite the dataset as well as [mteb](https://githu
54
  ```bibtex
55
 
56
  @inproceedings{cohan-etal-2020-specter,
57
- title = "{SPECTER}: Document-level Representation Learning using Citation-informed Transformers",
58
- author = "Cohan, Arman and
59
- Feldman, Sergey and
60
- Beltagy, Iz and
61
- Downey, Doug and
62
- Weld, Daniel",
63
- editor = "Jurafsky, Dan and
64
- Chai, Joyce and
65
- Schluter, Natalie and
66
- Tetreault, Joel",
67
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
68
- month = jul,
69
- year = "2020",
70
- address = "Online",
71
- publisher = "Association for Computational Linguistics",
72
- url = "https://aclanthology.org/2020.acl-main.207",
73
- doi = "10.18653/v1/2020.acl-main.207",
74
- pages = "2270--2282",
75
- abstract = "Representation learning is a critical ingredient for natural language processing systems. Recent Transformer language models like BERT learn powerful textual representations, but these models are targeted towards token- and sentence-level training objectives and do not leverage information on inter-document relatedness, which limits their document-level representation power. For applications on scientific documents, such as classification and recommendation, accurate embeddings of documents are a necessity. We propose SPECTER, a new method to generate document-level embedding of scientific papers based on pretraining a Transformer language model on a powerful signal of document-level relatedness: the citation graph. Unlike existing pretrained language models, Specter can be easily applied to downstream applications without task-specific fine-tuning. Additionally, to encourage further research on document-level models, we introduce SciDocs, a new evaluation benchmark consisting of seven document-level tasks ranging from citation prediction, to document classification and recommendation. We show that Specter outperforms a variety of competitive baselines on the benchmark.",
76
  }
77
 
78
 
 
54
  ```bibtex
55
 
56
  @inproceedings{cohan-etal-2020-specter,
57
+ abstract = {Representation learning is a critical ingredient for natural language processing systems. Recent Transformer language models like BERT learn powerful textual representations, but these models are targeted towards token- and sentence-level training objectives and do not leverage information on inter-document relatedness, which limits their document-level representation power. For applications on scientific documents, such as classification and recommendation, accurate embeddings of documents are a necessity. We propose SPECTER, a new method to generate document-level embedding of scientific papers based on pretraining a Transformer language model on a powerful signal of document-level relatedness: the citation graph. Unlike existing pretrained language models, Specter can be easily applied to downstream applications without task-specific fine-tuning. Additionally, to encourage further research on document-level models, we introduce SciDocs, a new evaluation benchmark consisting of seven document-level tasks ranging from citation prediction, to document classification and recommendation. We show that Specter outperforms a variety of competitive baselines on the benchmark.},
58
+ address = {Online},
59
+ author = {Cohan, Arman and
60
+ Feldman, Sergey and
61
+ Beltagy, Iz and
62
+ Downey, Doug and
63
+ Weld, Daniel},
64
+ booktitle = {Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
65
+ doi = {10.18653/v1/2020.acl-main.207},
66
+ editor = {Jurafsky, Dan and
67
+ Chai, Joyce and
68
+ Schluter, Natalie and
69
+ Tetreault, Joel},
70
+ month = jul,
71
+ pages = {2270--2282},
72
+ publisher = {Association for Computational Linguistics},
73
+ title = {{SPECTER}: Document-level Representation Learning using Citation-informed Transformers},
74
+ url = {https://aclanthology.org/2020.acl-main.207},
75
+ year = {2020},
76
  }
77
 
78