@inproceedings{harrison-etal-2023-run,
title = "Run Like a Girl! Sport-Related Gender Bias in Language and Vision",
author = "Harrison, Sophia and
Gualdoni, Eleonora and
Boleda, Gemma",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.findings-acl.886",
doi = "10.18653/v1/2023.findings-acl.886",
pages = "14093--14103",
abstract = "Gender bias in Language and Vision datasets and models has the potential to perpetuate harmful stereotypes and discrimination. We analyze gender bias in two Language and Vision datasets. Consistent with prior work, we find that both datasets underrepresent women, which promotes their invisibilization. Moreover, we hypothesize and find that a bias affects human naming choices for people playing sports: speakers produce names indicating the sport (e.g. {``}tennis player{''} or {``}surfer{''}) more often when it is a man or a boy participating in the sport than when it is a woman or a girl, with an average of 46{\%} vs. 35{\%} of sports-related names for each gender. A computational model trained on these naming data reproduces thebias. We argue that both the data and the model result in representational harm against women.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="harrison-etal-2023-run">
<titleInfo>
<title>Run Like a Girl! Sport-Related Gender Bias in Language and Vision</title>
</titleInfo>
<name type="personal">
<namePart type="given">Sophia</namePart>
<namePart type="family">Harrison</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eleonora</namePart>
<namePart type="family">Gualdoni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gemma</namePart>
<namePart type="family">Boleda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: ACL 2023</title>
</titleInfo>
<name type="personal">
<namePart type="given">Anna</namePart>
<namePart type="family">Rogers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jordan</namePart>
<namePart type="family">Boyd-Graber</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Naoaki</namePart>
<namePart type="family">Okazaki</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Toronto, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Gender bias in Language and Vision datasets and models has the potential to perpetuate harmful stereotypes and discrimination. We analyze gender bias in two Language and Vision datasets. Consistent with prior work, we find that both datasets underrepresent women, which promotes their invisibilization. Moreover, we hypothesize and find that a bias affects human naming choices for people playing sports: speakers produce names indicating the sport (e.g. “tennis player” or “surfer”) more often when it is a man or a boy participating in the sport than when it is a woman or a girl, with an average of 46% vs. 35% of sports-related names for each gender. A computational model trained on these naming data reproduces thebias. We argue that both the data and the model result in representational harm against women.</abstract>
<identifier type="citekey">harrison-etal-2023-run</identifier>
<identifier type="doi">10.18653/v1/2023.findings-acl.886</identifier>
<location>
<url>https://aclanthology.org/2023.findings-acl.886</url>
</location>
<part>
<date>2023-07</date>
<extent unit="page">
<start>14093</start>
<end>14103</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Run Like a Girl! Sport-Related Gender Bias in Language and Vision
%A Harrison, Sophia
%A Gualdoni, Eleonora
%A Boleda, Gemma
%Y Rogers, Anna
%Y Boyd-Graber, Jordan
%Y Okazaki, Naoaki
%S Findings of the Association for Computational Linguistics: ACL 2023
%D 2023
%8 July
%I Association for Computational Linguistics
%C Toronto, Canada
%F harrison-etal-2023-run
%X Gender bias in Language and Vision datasets and models has the potential to perpetuate harmful stereotypes and discrimination. We analyze gender bias in two Language and Vision datasets. Consistent with prior work, we find that both datasets underrepresent women, which promotes their invisibilization. Moreover, we hypothesize and find that a bias affects human naming choices for people playing sports: speakers produce names indicating the sport (e.g. “tennis player” or “surfer”) more often when it is a man or a boy participating in the sport than when it is a woman or a girl, with an average of 46% vs. 35% of sports-related names for each gender. A computational model trained on these naming data reproduces thebias. We argue that both the data and the model result in representational harm against women.
%R 10.18653/v1/2023.findings-acl.886
%U https://aclanthology.org/2023.findings-acl.886
%U https://doi.org/10.18653/v1/2023.findings-acl.886
%P 14093-14103
Markdown (Informal)
[Run Like a Girl! Sport-Related Gender Bias in Language and Vision](https://aclanthology.org/2023.findings-acl.886) (Harrison et al., Findings 2023)
ACL