library(tidyverse)
library(rvest)
library(robotstxt)
AE 11: Scraping multiple pages of articles from the Cornell Review
Suggested answers
Packages
We will use the following packages in this application exercise.
- tidyverse: For data import, wrangling, and visualization.
- rvest: For scraping HTML files.
- lubridate: For formatting date variables.
- robotstxt: For verifying if we can scrape a website.
Part 1 - Data scraping
See the code below stored in iterate-cornell-review.R
.
# load packages
library(tidyverse)
library(rvest)
library(robotstxt)
# check that we can scrape data from the cornell review
paths_allowed("https://www.thecornellreview.org/")
# read the first page
<- read_html("https://www.thecornellreview.org/")
page
# extract desired components
<- html_elements(x = page, css = "#main .read-title a") |>
titles html_text2()
<- html_elements(x = page, css = "#main .byline a") |>
authors html_text2()
<- html_elements(x = page, css = "#main .posts-date") |>
article_dates html_text2()
<- html_elements(x = page, css = "#main .cat-links") |>
topics html_text2()
<- html_elements(x = page, css = ".post-description") |>
abstracts html_text2()
<- html_elements(x = page, css = ".aft-readmore") |>
post_urls html_attr(name = "href")
# create a tibble with this data
<- tibble(
review_raw title = titles,
author = authors,
date = article_dates,
topic = topics,
description = abstracts,
url = post_urls
)
# clean up the data
<- review_raw |>
review mutate(
date = mdy(date),
description = str_remove(string = description, pattern = "\nRead More")
)
######## write a for loop to scrape the first 10 pages
<- vector(mode = "list", length = 5)
scrape_results
for(page_num in 1:length(scrape_results)) {
# print a message to keep track of where we are in the iteration
message(str_glue("Scraping page {page_num}"))
# pause for a couple of seconds to prevent rapid HTTP requests
Sys.sleep(2)
# create url
<- str_glue("https://www.thecornellreview.org/page/{page_num}/")
url
# read the first page
<- read_html(url)
page
# extract desired components
<- html_elements(x = page, css = "#main .read-title a") |>
titles html_text2()
<- html_elements(x = page, css = "#main .byline a") |>
authors html_text2()
<- html_elements(x = page, css = "#main .posts-date") |>
article_dates html_text2()
<- html_elements(x = page, css = "#main .cat-links") |>
topics html_text2()
<- html_elements(x = page, css = ".post-description") |>
abstracts html_text2()
<- html_elements(x = page, css = ".aft-readmore") |>
post_urls html_attr(name = "href")
# create a tibble with this data
<- tibble(
review_raw title = titles,
author = authors,
date = article_dates,
topic = topics,
description = abstracts,
url = post_urls
)
# clean up the data
<- review_raw |>
review mutate(
date = mdy(date),
description = str_remove(string = description, pattern = "\nRead More")
)
# store in list output
<- review
scrape_results[[page_num]]
}
# collapse list of data frames to a single data frame
<- list_rbind(x = scrape_results)
scrape_df
######## write a function to scrape a single page and use a map() function
######## to iterate over the first ten pages
# convert to a function
<- function(url){
scrape_review # pause for a couple of seconds to prevent rapid HTTP requests
Sys.sleep(2)
# read the first page
<- read_html(url)
page
# extract desired components
<- html_elements(x = page, css = "#main .read-title a") |>
titles html_text2()
<- html_elements(x = page, css = "#main .byline a") |>
authors html_text2()
<- html_elements(x = page, css = "#main .posts-date") |>
article_dates html_text2()
<- html_elements(x = page, css = "#main .cat-links") |>
topics html_text2()
<- html_elements(x = page, css = ".post-description") |>
abstracts html_text2()
<- html_elements(x = page, css = ".aft-readmore") |>
post_urls html_attr(name = "href")
# create a tibble with this data
<- tibble(
review_raw title = titles,
author = authors,
date = article_dates,
topic = topics,
description = abstracts,
url = post_urls
)
# clean up the data
<- review_raw |>
review mutate(
date = mdy(date),
description = str_remove(string = description, pattern = "\nRead More")
)
# export the resulting data frame
return(review)
}
# test function
## page 1
scrape_review(url = "https://www.thecornellreview.org/page/1/")
## page 2
scrape_review(url = "https://www.thecornellreview.org/page/2/")
## page 3
scrape_review(url = "https://www.thecornellreview.org/page/3/")
# create a vector of URLs
<- 1:10
page_nums <- str_glue("https://www.thecornellreview.org/page/{page_nums}/")
cr_urls
cr_urls
# map function over URLs
<- map(.x = cr_urls, .f = scrape_review, .progress = TRUE) |>
cr_reviews list_rbind()
# write data
write_csv(x = cr_reviews, file = "data/cornell-review-all.csv")
Part 2 - Data analysis
Demo: Import the scraped data set.
<- read_csv(file = "data/cornell-review-all.csv") cr_reviews
Rows: 100 Columns: 6
── Column specification ────────────────────────────────────────────────────────
Delimiter: ","
chr (5): title, author, topic, description, url
date (1): date
ℹ Use `spec()` to retrieve the full column specification for this data.
ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
cr_reviews
# A tibble: 100 × 6
title author date topic description url
<chr> <chr> <date> <chr> <chr> <chr>
1 OPINION | Return Conduct Rules and… Revie… 2024-03-09 Opin… "Cornell's… http…
2 Lecture Examines the State of Free… Revie… 2024-03-07 Camp… "On Februa… http…
3 The Futility of the CML Casey… 2024-03-07 Camp… "The CML’s… http…
4 Authors of Canceling of the Americ… Revie… 2024-03-04 Unca… "On March … http…
5 Kimberlé Crenshaw Delivers MLK Lec… Revie… 2024-03-01 Camp… "On Monday… http…
6 Cornellians entrust algorithm with… Eben … 2024-02-15 Camp… "Over 5,00… http…
7 Seven fraternities temporarily sus… Revie… 2024-02-15 Camp… "Seven fra… http…
8 HEARD AT CORNELL | The ‘occupation… Revie… 2024-02-14 Camp… "Heard at … http…
9 Tompkins County Legislature Reject… Revie… 2024-02-08 Beyo… "On Februa… http…
10 Kimberlé Crenshaw To Give Martin L… Revie… 2024-02-07 Camp… "Alumna Ki… http…
# ℹ 90 more rows
Demo: Who are the most prolific authors?
|>
cr_reviews # adjust order of authors so they appear from most to least frequent
mutate(author = fct_infreq(f = author) |>
fct_rev()) |>
# horizontal bar chart
ggplot(mapping = aes(y = author)) +
geom_bar()
Demo: What topics does The Cornell Review write about?
# basic bar plot
ggplot(data = cr_reviews, mapping = aes(y = topic)) +
geom_bar()
Not super helpful. Each article can have multiple topics. What is the syntax for this column?
|>
cr_reviews select(topic) |>
filter(str_detect(string = topic, pattern = "\n"))
# A tibble: 8 × 1
topic
<chr>
1 "Beyond Cayuga's Waters\nCampus"
2 "Culture\nOpinion"
3 "Campus\nCornell Politics"
4 "Campus\nOpinion"
5 "Culture\nOpinion"
6 "Beyond Cayuga's Waters\nNew York"
7 "Campus\nEditorial"
8 "Opinion\nSpring 2023 Edition"
Articles with multiple topics are separated by a "\n"
. Since the number of topics varies for each article, we should separate_longer_delim()
this column. Instead we can use a stringr function to split them into distinct character strings.
|>
cr_reviews separate_longer_delim(
cols = topic,
delim = "\n"
)
# A tibble: 108 × 6
title author date topic description url
<chr> <chr> <date> <chr> <chr> <chr>
1 OPINION | Return Conduct Rules and… Revie… 2024-03-09 Opin… "Cornell's… http…
2 Lecture Examines the State of Free… Revie… 2024-03-07 Camp… "On Februa… http…
3 The Futility of the CML Casey… 2024-03-07 Camp… "The CML’s… http…
4 Authors of Canceling of the Americ… Revie… 2024-03-04 Unca… "On March … http…
5 Kimberlé Crenshaw Delivers MLK Lec… Revie… 2024-03-01 Camp… "On Monday… http…
6 Cornellians entrust algorithm with… Eben … 2024-02-15 Camp… "Over 5,00… http…
7 Seven fraternities temporarily sus… Revie… 2024-02-15 Camp… "Seven fra… http…
8 HEARD AT CORNELL | The ‘occupation… Revie… 2024-02-14 Camp… "Heard at … http…
9 Tompkins County Legislature Reject… Revie… 2024-02-08 Beyo… "On Februa… http…
10 Kimberlé Crenshaw To Give Martin L… Revie… 2024-02-07 Camp… "Alumna Ki… http…
# ℹ 98 more rows
Notice the data frame now has additional rows. The unit of analysis is now an article-topic combination, rather than one-row-per-article. Not entirely a tidy structure, but necessary to construct a chart to visualize topic frequency.
|>
cr_reviews separate_longer_delim(
cols = topic,
delim = "\n"
|>
) ggplot(mapping = aes(y = topic)) +
geom_bar()
Let’s clean this up like the previous chart.
|>
cr_reviews separate_longer_delim(
cols = topic,
delim = "\n"
|>
) mutate(topic = fct_infreq(f = topic) |>
fct_rev()) |>
ggplot(mapping = aes(y = topic)) +
geom_bar()
::session_info() sessioninfo
─ Session info ───────────────────────────────────────────────────────────────
setting value
version R version 4.3.2 (2023-10-31)
os macOS Ventura 13.5.2
system aarch64, darwin20
ui X11
language (EN)
collate en_US.UTF-8
ctype en_US.UTF-8
tz America/New_York
date 2024-03-10
pandoc 3.1.1 @ /Applications/RStudio.app/Contents/Resources/app/quarto/bin/tools/ (via rmarkdown)
─ Packages ───────────────────────────────────────────────────────────────────
package * version date (UTC) lib source
bit 4.0.5 2022-11-15 [1] CRAN (R 4.3.0)
bit64 4.0.5 2020-08-30 [1] CRAN (R 4.3.0)
cli 3.6.2 2023-12-11 [1] CRAN (R 4.3.1)
colorspace 2.1-0 2023-01-23 [1] CRAN (R 4.3.0)
crayon 1.5.2 2022-09-29 [1] CRAN (R 4.3.0)
digest 0.6.34 2024-01-11 [1] CRAN (R 4.3.1)
dplyr * 1.1.4 2023-11-17 [1] CRAN (R 4.3.1)
evaluate 0.23 2023-11-01 [1] CRAN (R 4.3.1)
fansi 1.0.6 2023-12-08 [1] CRAN (R 4.3.1)
farver 2.1.1 2022-07-06 [1] CRAN (R 4.3.0)
fastmap 1.1.1 2023-02-24 [1] CRAN (R 4.3.0)
forcats * 1.0.0 2023-01-29 [1] CRAN (R 4.3.0)
generics 0.1.3 2022-07-05 [1] CRAN (R 4.3.0)
ggplot2 * 3.4.4 2023-10-12 [1] CRAN (R 4.3.1)
glue 1.7.0 2024-01-09 [1] CRAN (R 4.3.1)
gtable 0.3.4 2023-08-21 [1] CRAN (R 4.3.0)
here 1.0.1 2020-12-13 [1] CRAN (R 4.3.0)
hms 1.1.3 2023-03-21 [1] CRAN (R 4.3.0)
htmltools 0.5.7 2023-11-03 [1] CRAN (R 4.3.1)
htmlwidgets 1.6.4 2023-12-06 [1] CRAN (R 4.3.1)
httr 1.4.7 2023-08-15 [1] CRAN (R 4.3.0)
jsonlite 1.8.8 2023-12-04 [1] CRAN (R 4.3.1)
knitr 1.45 2023-10-30 [1] CRAN (R 4.3.1)
labeling 0.4.3 2023-08-29 [1] CRAN (R 4.3.0)
lifecycle 1.0.4 2023-11-07 [1] CRAN (R 4.3.1)
lubridate * 1.9.3 2023-09-27 [1] CRAN (R 4.3.1)
magrittr 2.0.3 2022-03-30 [1] CRAN (R 4.3.0)
munsell 0.5.0 2018-06-12 [1] CRAN (R 4.3.0)
pillar 1.9.0 2023-03-22 [1] CRAN (R 4.3.0)
pkgconfig 2.0.3 2019-09-22 [1] CRAN (R 4.3.0)
purrr * 1.0.2 2023-08-10 [1] CRAN (R 4.3.0)
R6 2.5.1 2021-08-19 [1] CRAN (R 4.3.0)
readr * 2.1.5 2024-01-10 [1] CRAN (R 4.3.1)
rlang 1.1.3 2024-01-10 [1] CRAN (R 4.3.1)
rmarkdown 2.25 2023-09-18 [1] CRAN (R 4.3.1)
robotstxt * 0.7.13 2020-09-03 [1] CRAN (R 4.3.0)
rprojroot 2.0.4 2023-11-05 [1] CRAN (R 4.3.1)
rstudioapi 0.15.0 2023-07-07 [1] CRAN (R 4.3.0)
rvest * 1.0.3 2022-08-19 [1] CRAN (R 4.3.0)
scales 1.2.1 2024-01-18 [1] Github (r-lib/scales@c8eb772)
sessioninfo 1.2.2 2021-12-06 [1] CRAN (R 4.3.0)
stringi 1.8.3 2023-12-11 [1] CRAN (R 4.3.1)
stringr * 1.5.1 2023-11-14 [1] CRAN (R 4.3.1)
tibble * 3.2.1 2023-03-20 [1] CRAN (R 4.3.0)
tidyr * 1.3.0 2023-01-24 [1] CRAN (R 4.3.0)
tidyselect 1.2.0 2022-10-10 [1] CRAN (R 4.3.0)
tidyverse * 2.0.0 2023-02-22 [1] CRAN (R 4.3.0)
timechange 0.2.0 2023-01-11 [1] CRAN (R 4.3.0)
tzdb 0.4.0 2023-05-12 [1] CRAN (R 4.3.0)
utf8 1.2.4 2023-10-22 [1] CRAN (R 4.3.1)
vctrs 0.6.5 2023-12-01 [1] CRAN (R 4.3.1)
vroom 1.6.5 2023-12-05 [1] CRAN (R 4.3.1)
withr 2.5.2 2023-10-30 [1] CRAN (R 4.3.1)
xfun 0.41 2023-11-01 [1] CRAN (R 4.3.1)
xml2 1.3.6 2023-12-04 [1] CRAN (R 4.3.1)
yaml 2.3.8 2023-12-11 [1] CRAN (R 4.3.1)
[1] /Library/Frameworks/R.framework/Versions/4.3-arm64/Resources/library
──────────────────────────────────────────────────────────────────────────────