Skip to content

Commit 7cc6f12

Browse files
committed
emnlp
1 parent 6662d9e commit 7cc6f12

File tree

2 files changed

+20
-8
lines changed

2 files changed

+20
-8
lines changed

_bibliography/pubs.bib

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,33 @@
1-
---
2-
---
1+
%% This BibTeX bibliography file was created using BibDesk.
2+
%% http://bibdesk.sourceforge.net/
3+
4+
%% Created for j at 2021-09-10 10:36:49 -0400
5+
6+
7+
%% Saved with string encoding Unicode (UTF-8)
8+
9+
310
411
@inproceedings{hoover.j:2020,
512
abstract = {Icelandic dative-nominative constructions exhibit a syntactic hierarchy effect known as the Person Restriction: only third person nominatives may control agreement. In these constructions, there is variation between speakers in the extent to which the verb agrees with the nominative for number. Sigurðsson & Holmberg (2008) explain this variation as arising due to differences between varieties in the timing of subject raising, using a split phi-probe. This paper revises their approach, using the feature gluttony mechanism for Agree developed in Coon & Keine (2020), and a split phi-probe in which person probing precedes number probing. Within this framework, the observed variation can be captured by allowing variability two independent parameters: the timing of EPP subject raising, and the visibility of a number feature on dative DPs. The proposed mechanism describes the variation, including predicting the observed optional agreement in certain cases that previous literature had struggled to account for, and makes additional predictions about the differences between varieties in cases of syncretism within the verbal paradigm. An investigation into these predictions should allow this already well-studied area of Icelandic grammar to continue to be a useful test-case for crosslinguistic assumptions about the mechanism of Agree, and the status of dative arguments.},
613
address = {Somerville, Mass., USA},
714
author = {Jacob Louis Hoover},
815
booktitle = {Proceedings of the 38th West Coast Conference on Formal Linguistics},
916
editor = {Daniel Reisinger and Rachel Soo},
17+
handout = {http://doi.org/10.14288/1.0389856},
18+
pdf = {wccfl2020-icelandic_gluttony-proceedings.pdf},
1019
publisher = {Cascadilla Proceedings Project},
1120
pubstate = {forthcoming},
1221
title = {Accounting for variation in number agreement in Icelandic dative--nominative constructions},
13-
year = {2020},
14-
pdf = {wccfl2020-icelandic_gluttony-proceedings.pdf},
15-
handout= {http://doi.org/10.14288/1.0389856}}
22+
year = {2020}}
1623

1724
@misc{hoover.j:2021,
18-
abstract = {What is the relationship between linguistic dependencies and statistical dependence? Building on earlier work in NLP and cognitive science, we study this question. We introduce a contextualized version of pointwise mutual information (CPMI), using pretrained language models to estimate probabilities of words in context. Extracting dependency trees which maximize CPMI, we compare the resulting structures against gold dependencies. Overall, we find that these maximum-CPMI trees correspond to linguistic dependencies more often than trees extracted from non-contextual PMI estimate, but only roughly as often as a simple baseline formed by connecting adjacent words. We also provide evidence that the extent to which the two kinds of dependency align cannot be explained by the distance between words or by the category of the dependency relation. Finally, our analysis sheds some light on the differences between large pretrained language models, specifically in the kinds of inductive biases they encode.},
25+
abstract = {Are pairs of words that tend to occur together also likely to stand in a linguistic dependency? This empirical question is motivated by a long history of literature in cognitive science, psycholinguistics, and NLP. In this work we contribute an extensive analysis of the relationship between linguistic dependencies and statistical dependence between words. Improving on previous work, we introduce the use of large pretrained language models to compute contextualized estimates of the pointwise mutual information between words (CPMI). For multiple models and languages, we extract dependency trees which maximize CPMI, and compare to gold standard linguistic dependencies. Overall, we find that CPMI dependencies achieve an unlabelled undirected attachment score of at most $\approx 0.5$. While far above chance, and consistently above a non-contextualized PMI baseline, this score is generally comparable to a simple baseline formed by connecting adjacent words. We analyze which kinds of linguistic dependencies are best captured in CPMI dependencies, and also find marked differences between the estimates of the large pretrained language models, illustrating how their different training schemes affect the type of dependencies they capture.},
1926
archiveprefix = {arXiv},
2027
author = {Jacob Louis Hoover and Alessandro Sordoni and Wenyu Du and Timothy J. O'Donnell},
28+
date-modified = {2021-09-10 10:36:49 -0400},
2129
eprint = {2104.08685},
30+
note = {Accepted to EMNLP2021},
2231
primaryclass = {cs.CL},
23-
title = {Linguistic dependencies and statistical dependence},
24-
year = {2021}}
32+
title = {Linguistic Dependencies and Statistical Dependence},
33+
year = {2021}}

_layouts/bib.html

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,9 @@
3636
{% elsif entry.howpublished %}
3737
{{entry.howpublished}}.
3838
{% endif %}
39+
{% if entry.note %}
40+
{{entry.note}}.
41+
{% endif %}
3942
{% if entry.day %}
4043
{{entry.day}}
4144
{% endif %}

0 commit comments

Comments
 (0)