Added some citations
This commit is contained in:
parent
e6006cb6d5
commit
b28d9ead68
@ -45,3 +45,35 @@
|
|||||||
year = {2023},
|
year = {2023},
|
||||||
note = {[Accessed 18-11-2023]},
|
note = {[Accessed 18-11-2023]},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@article{Demeter_Downey_2020,
|
||||||
|
title={Just Add Functions: A Neural-Symbolic Language Model},
|
||||||
|
volume={34},
|
||||||
|
url={https://ojs.aaai.org/index.php/AAAI/article/view/6264},
|
||||||
|
DOI={10.1609/aaai.v34i05.6264},
|
||||||
|
abstractNote={<p>Neural network language models (NNLMs) have achieved ever-improving accuracy due to more sophisticated architectures and increasing amounts of training data. However, the inductive bias of these models (formed by the distributional hypothesis of language), while ideally suited to modeling most running text, results in key limitations for today’s models. In particular, the models often struggle to learn certain spatial, temporal, or quantitative relationships, which are commonplace in text and are second-nature for human readers. Yet, in many cases, these relationships can be encoded with simple mathematical or logical expressions. How can we augment today’s neural models with such encodings?</p><p>In this paper, we propose a general methodology to enhance the inductive bias of NNLMs by incorporating simple functions into a neural architecture to form a hierarchical neural-symbolic language model (NSLM). These functions explicitly encode symbolic deterministic relationships to form probability distributions over words. We explore the effectiveness of this approach on numbers and geographic locations, and show that NSLMs significantly reduce perplexity in small-corpus language modeling, and that the performance improvement persists for rare tokens even on much larger corpora. The approach is simple and general, and we discuss how it can be applied to other word classes beyond numbers and geography.</p>},
|
||||||
|
number={05},
|
||||||
|
journal={Proceedings of the AAAI Conference on Artificial Intelligence},
|
||||||
|
author={Demeter, David and Downey, Doug},
|
||||||
|
year={2020}, month={Apr.},
|
||||||
|
pages={7634-7642}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@misc{pi2022reasoning,
|
||||||
|
title={Reasoning Like Program Executors},
|
||||||
|
author={Xinyu Pi and Qian Liu and Bei Chen and Morteza Ziyadi and Zeqi Lin and Qiang Fu and Yan Gao and Jian-Guang Lou and Weizhu Chen},
|
||||||
|
year={2022},
|
||||||
|
eprint={2201.11473},
|
||||||
|
archivePrefix={arXiv},
|
||||||
|
primaryClass={cs.CL}
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{chen2023program,
|
||||||
|
title={Program of Thoughts Prompting: Disentangling Computation from Reasoning for Numerical Reasoning Tasks},
|
||||||
|
author={Wenhu Chen and Xueguang Ma and Xinyi Wang and William W. Cohen},
|
||||||
|
year={2023},
|
||||||
|
eprint={2211.12588},
|
||||||
|
archivePrefix={arXiv},
|
||||||
|
primaryClass={cs.CL}
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user