diff --git a/docs/Hacking-Religion--TRS---Data-Science-in-Action.pdf b/docs/Hacking-Religion--TRS---Data-Science-in-Action.pdf
index 199fd32..92b02f2 100644
Binary files a/docs/Hacking-Religion--TRS---Data-Science-in-Action.pdf and b/docs/Hacking-Religion--TRS---Data-Science-in-Action.pdf differ
diff --git a/docs/chapter_1.html b/docs/chapter_1.html
index d6bd28b..d89507c 100644
--- a/docs/chapter_1.html
+++ b/docs/chapter_1.html
@@ -573,7 +573,7 @@ div.csl-indent {
2
-We’ll re-order the column by size.
+We’ll re-order the column by size.
@@ -596,19 +596,19 @@ div.csl-indent {
1
-First, remove the column with region names and the totals for the regions as we want just integer data.
+First, remove the column with region names and the totals for the regions as we want just integer data.
2
-Second calculate the totals. In this example we use the tidyverse library dplyr(), but you can also do this using base R with colsums() like this: uk_census_2021_religion_totals <- colSums(uk_census_2021_religion_totals, na.rm = TRUE). The downside with base R is that you’ll also need to convert the result into a dataframe for ggplot like this: uk_census_2021_religion_totals <- as.data.frame(uk_census_2021_religion_totals)
+Second calculate the totals. In this example we use the tidyverse library dplyr(), but you can also do this using base R with colsums() like this: uk_census_2021_religion_totals <- colSums(uk_census_2021_religion_totals, na.rm = TRUE). The downside with base R is that you’ll also need to convert the result into a dataframe for ggplot like this: uk_census_2021_religion_totals <- as.data.frame(uk_census_2021_religion_totals)
3
-In order to visualise this data using ggplot, we need to shift this data from wide to long format. This is a quick job using gather()
+In order to visualise this data using ggplot, we need to shift this data from wide to long format. This is a quick job using gather()
4
-Now plot it out and have a look!
+Now plot it out and have a look!
@@ -712,48 +712,47 @@ What is Nomis?
# Get table of Census 2011 religion data from nomis
-# Note: for reproducible code used to generate the dataset used in the book, see the cookbook here:
-z <-readRDS(file = (here("example_data", "z.rds")))
-
-# Filter down to simplified dataset with England / Wales and percentages without totals
-uk_census_2011_religion <-filter(z, GEOGRAPHY_NAME=="England and Wales"& RURAL_URBAN_NAME=="Total"& C_RELPUK11_NAME !="All categories: Religion")
-# Drop unnecessary columns
-uk_census_2011_religion <-select(uk_census_2011_religion, C_RELPUK11_NAME, OBS_VALUE)
-# Plot results
-plot1 <-ggplot(uk_census_2011_religion, aes(x = C_RELPUK11_NAME, y = OBS_VALUE)) +geom_bar(stat ="identity") +theme(axis.text.x =element_text(angle =90, vjust =0.5, hjust=1))
-# ggsave(filename = "plot.png", plot = plot1)
-
-# grab daata from nomis for 2001 census religion / ethnicity
-z0 <-readRDS(file = (here("example_data", "z0.rds")))
-
-# select relevant columns
-uk_census_2001_religion_ethnicity <-select(z0, GEOGRAPHY_NAME, C_RELPUK11_NAME, C_ETHHUK11_NAME, OBS_VALUE)
-# Filter down to simplified dataset with England / Wales and percentages without totals
-uk_census_2001_religion_ethnicity <-filter(uk_census_2001_religion_ethnicity, GEOGRAPHY_NAME=="England and Wales"& C_RELPUK11_NAME !="All categories: Religion")
-# Simplify data to only include general totals and omit subcategories
-uk_census_2001_religion_ethnicity <- uk_census_2001_religion_ethnicity %>%filter(grepl('Total', C_ETHHUK11_NAME))
-
-# grab data from nomis for 2011 census religion / ethnicity table
-z1 <-readRDS(file = (here("example_data", "z1.rds")))
-
-# select relevant columns
-uk_census_2011_religion_ethnicity <-select(z1, GEOGRAPHY_NAME, C_RELPUK11_NAME, C_ETHPUK11_NAME, OBS_VALUE)
-# Filter down to simplified dataset with England / Wales and percentages without totals
-uk_census_2011_religion_ethnicity <-filter(uk_census_2011_religion_ethnicity, GEOGRAPHY_NAME=="England and Wales"& C_RELPUK11_NAME !="All categories: Religion"& C_ETHPUK11_NAME !="All categories: Ethnic group")
-# Simplify data to only include general totals and omit subcategories
-uk_census_2011_religion_ethnicity <- uk_census_2011_religion_ethnicity %>%filter(grepl('Total', C_ETHPUK11_NAME))
-
-# grab data from nomis for 2021 census religion / ethnicity table
-z2 <-readRDS(file = (here("example_data", "z2.rds")))
-
-# select relevant columns
-uk_census_2021_religion_ethnicity <-select(z2, GEOGRAPHY_NAME, C2021_RELIGION_10_NAME, C2021_ETH_8_NAME, OBS_VALUE)
-# Filter down to simplified dataset with England / Wales and percentages without totals
-uk_census_2021_religion_ethnicity <-filter(uk_census_2021_religion_ethnicity, GEOGRAPHY_NAME=="England and Wales"& C2021_RELIGION_10_NAME !="Total"& C2021_ETH_8_NAME !="Total")
-# 2021 census includes white sub-groups so we need to omit those so we just have totals:
-uk_census_2021_religion_ethnicity <-filter(uk_census_2021_religion_ethnicity, C2021_ETH_8_NAME !="White: English, Welsh, Scottish, Northern Irish or British"& C2021_ETH_8_NAME !="White: Irish"& C2021_ETH_8_NAME !="White: Gypsy or Irish Traveller, Roma or Other White")
-
-ggplot(uk_census_2011_religion_ethnicity, aes(fill=C_ETHPUK11_NAME, x=C_RELPUK11_NAME, y=OBS_VALUE)) +geom_bar(position="dodge", stat ="identity", colour ="black") +scale_fill_brewer(palette ="Set1") +ggtitle("Religious Affiliation in the 2021 Census of England and Wales") +xlab("") +ylab("") +theme(axis.text.x =element_text(angle =90, vjust =0.5, hjust=1))
+z <-readRDS(file = (here("example_data", "z.rds")))
+
+# Filter down to simplified dataset with England / Wales and percentages without totals
+uk_census_2011_religion <-filter(z, GEOGRAPHY_NAME=="England and Wales"& RURAL_URBAN_NAME=="Total"& C_RELPUK11_NAME !="All categories: Religion")
+# Drop unnecessary columns
+uk_census_2011_religion <-select(uk_census_2011_religion, C_RELPUK11_NAME, OBS_VALUE)
+# Plot results
+plot1 <-ggplot(uk_census_2011_religion, aes(x = C_RELPUK11_NAME, y = OBS_VALUE)) +geom_bar(stat ="identity") +theme(axis.text.x =element_text(angle =90, vjust =0.5, hjust=1))
+# ggsave(filename = "plot.png", plot = plot1)
+
+# grab daata from nomis for 2001 census religion / ethnicity
+z0 <-readRDS(file = (here("example_data", "z0.rds")))
+
+# select relevant columns
+uk_census_2001_religion_ethnicity <-select(z0, GEOGRAPHY_NAME, C_RELPUK11_NAME, C_ETHHUK11_NAME, OBS_VALUE)
+# Filter down to simplified dataset with England / Wales and percentages without totals
+uk_census_2001_religion_ethnicity <-filter(uk_census_2001_religion_ethnicity, GEOGRAPHY_NAME=="England and Wales"& C_RELPUK11_NAME !="All categories: Religion")
+# Simplify data to only include general totals and omit subcategories
+uk_census_2001_religion_ethnicity <- uk_census_2001_religion_ethnicity %>%filter(grepl('Total', C_ETHHUK11_NAME))
+
+# grab data from nomis for 2011 census religion / ethnicity table
+z1 <-readRDS(file = (here("example_data", "z1.rds")))
+
+# select relevant columns
+uk_census_2011_religion_ethnicity <-select(z1, GEOGRAPHY_NAME, C_RELPUK11_NAME, C_ETHPUK11_NAME, OBS_VALUE)
+# Filter down to simplified dataset with England / Wales and percentages without totals
+uk_census_2011_religion_ethnicity <-filter(uk_census_2011_religion_ethnicity, GEOGRAPHY_NAME=="England and Wales"& C_RELPUK11_NAME !="All categories: Religion"& C_ETHPUK11_NAME !="All categories: Ethnic group")
+# Simplify data to only include general totals and omit subcategories
+uk_census_2011_religion_ethnicity <- uk_census_2011_religion_ethnicity %>%filter(grepl('Total', C_ETHPUK11_NAME))
+
+# grab data from nomis for 2021 census religion / ethnicity table
+z2 <-readRDS(file = (here("example_data", "z2.rds")))
+
+# select relevant columns
+uk_census_2021_religion_ethnicity <-select(z2, GEOGRAPHY_NAME, C2021_RELIGION_10_NAME, C2021_ETH_8_NAME, OBS_VALUE)
+# Filter down to simplified dataset with England / Wales and percentages without totals
+uk_census_2021_religion_ethnicity <-filter(uk_census_2021_religion_ethnicity, GEOGRAPHY_NAME=="England and Wales"& C2021_RELIGION_10_NAME !="Total"& C2021_ETH_8_NAME !="Total")
+# 2021 census includes white sub-groups so we need to omit those so we just have totals:
+uk_census_2021_religion_ethnicity <-filter(uk_census_2021_religion_ethnicity, C2021_ETH_8_NAME !="White: English, Welsh, Scottish, Northern Irish or British"& C2021_ETH_8_NAME !="White: Irish"& C2021_ETH_8_NAME !="White: Gypsy or Irish Traveller, Roma or Other White")
+
+ggplot(uk_census_2011_religion_ethnicity, aes(fill=C_ETHPUK11_NAME, x=C_RELPUK11_NAME, y=OBS_VALUE)) +geom_bar(position="dodge", stat ="identity", colour ="black") +scale_fill_brewer(palette ="Set1") +ggtitle("Religious Affiliation in the 2021 Census of England and Wales") +xlab("") +ylab("") +theme(axis.text.x =element_text(angle =90, vjust =0.5, hjust=1))
# R Setup -----------------------------------------------------------------setwd("/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion")
-library(here)
-
-
here() starts at /Users/kidwellj/gits/hacking_religion_textbook
-
-
library(tidyverse)
-
-
-- Attaching core tidyverse packages ------------------------ tidyverse 2.0.0 --
-v dplyr 1.1.3 v readr 2.1.4
-v forcats 1.0.0 v stringr 1.5.0
-v ggplot2 3.4.3 v tibble 3.2.1
-v lubridate 1.9.3 v tidyr 1.3.0
-v purrr 1.0.2
-
-
-
-- Conflicts ------------------------------------------ tidyverse_conflicts() --
-x dplyr::filter() masks stats::filter()
-x dplyr::lag() masks stats::lag()
-i Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
-
-
library(haven) # used for importing SPSS .sav files
-here::i_am("chapter_2.qmd")
+library(here) |>suppressPackageStartupMessages()
+library(tidyverse) |>suppressPackageStartupMessages()
+# used for importing SPSS .sav files
+library(haven) |>suppressPackageStartupMessages()
+here::i_am("chapter_2.qmd")
here() starts at /Users/kidwellj/gits/hacking_religion_textbook/hacking_religion
The first thing to note here is that we’ve drawn in a different type of data file, this time from an .sav file, usully produced by the statistics software package SPSS. This uses a different R Library (I use haven for this). The upside is that in some cases where you have survey data with both a code and a value like “1” is eqivalent to “very much agree” this will preserve both in the R dataframe that is created. Now that you’ve loaded in data, you have a new R dataframe called “climate_experience_data” with a lot of columns with just under 1000 survey responses.
@@ -334,9 +318,9 @@ So who’s religious?
Let’s dive into the data and see how this all works out. We’ll start with the question 56 data, around religious affiliation:
There are few things we need to do here to get the data into initial proper shape. This might be called “cleaning” the data:
@@ -346,31 +330,31 @@ So who’s religious?
If we pause at this point to view the data, you’ll see it’s basically just a long list of survey responses. What we need is a count of each unique response (or factor). This will take a few more steps:
-First we generate new a dataframe with sums per category and
+First we generate new a dataframe with sums per category and
-
2
+
2
-…sort in descending order
+…sort in descending order
-
3
+
3
-Then we add new column with percentages based on the sums you’ve just generated
+Then we add new column with percentages based on the sums you’ve just generated
That should give us a tidy table of results, which you can see if you view the contents of our new religious_affiliation_sums dataframe:
-
head(religious_affiliation_sums)
+
head(religious_affiliation_sums)
# A tibble: 6 x 3
response n perc
@@ -384,13 +368,13 @@ So who’s religious?
-
# make plot
-ggplot(religious_affiliation_sums, aes(x = n, y = response)) +
-geom_col(colour ="white") +
-## add percentage labels
-geom_text(aes(label = perc),
-## make labels left-aligned and white
-hjust =1, nudge_x =-.5, colour ="white", size=3)
+
# make plot
+ggplot(religious_affiliation_sums, aes(x = n, y = response)) +
+geom_col(colour ="white") +
+## add percentage labels
+geom_text(aes(label = perc),
+## make labels left-aligned and white
+hjust =1, nudge_x =-.5, colour ="white", size=3)
@@ -398,20 +382,20 @@ So who’s religious?
I’ve added one feature to our chart that wasn’t in the bar charts in chapter 1, text labels with the actual value on each bar.
You may be thinking about the plots we’ve just finished in chapter 1 and wondering how they compare. Let’s use the same facet approach that we’ve just used to render this data in a subsetted way.
-
# First we need to add in data on ethnic self-identification from our respondents:
-df <-select(climate_experience_data, Q56, Q0)
-religious_affiliation_ethnicity <-as_tibble(as_factor(df))
-names(religious_affiliation_ethnicity) <-c("Religion", "Ethnicity")
-
-religious_affiliation_ethnicity_sums <- religious_affiliation_ethnicity %>%
-group_by(Ethnicity) %>%
- dplyr::count(Religion, sort =TRUE) %>%
- dplyr::mutate(Religion = forcats::fct_rev(forcats::fct_inorder(Religion)))
-
-plot1 <-ggplot(religious_affiliation_ethnicity_sums, aes(x = n, y = Religion)) +
-geom_col(colour ="white") +facet_wrap(~Ethnicity, scales="free_x")
-
-ggsave("chart.png", plot=plot1, width =8, height =10, units=c("in"))
+
# First we need to add in data on ethnic self-identification from our respondents:
+df <-select(climate_experience_data, Q56, Q0)
+religious_affiliation_ethnicity <-as_tibble(as_factor(df))
+names(religious_affiliation_ethnicity) <-c("Religion", "Ethnicity")
+
+religious_affiliation_ethnicity_sums <- religious_affiliation_ethnicity %>%
+group_by(Ethnicity) %>%
+ dplyr::count(Religion, sort =TRUE) %>%
+ dplyr::mutate(Religion = forcats::fct_rev(forcats::fct_inorder(Religion)))
+
+plot1 <-ggplot(religious_affiliation_ethnicity_sums, aes(x = n, y = Religion)) +
+geom_col(colour ="white") +facet_wrap(~Ethnicity, scales="free_x")
+
+ggsave("chart.png", plot=plot1, width =8, height =10, units=c("in"))
Use mutate to put “prefer not to say” at the bottom # Info here: https://r4ds.had.co.nz/factors.html#modifying-factor-levels
-- Attaching core tidyverse packages ------------------------ tidyverse 2.0.0 --
+v dplyr 1.1.3 v readr 2.1.4
+v forcats 1.0.0 v stringr 1.5.0
+v ggplot2 3.4.3 v tibble 3.2.1
+v lubridate 1.9.3 v tidyr 1.3.0
+v purrr 1.0.2
+-- Conflicts ------------------------------------------ tidyverse_conflicts() --
+x dplyr::filter() masks stats::filter()
+x dplyr::lag() masks stats::lag()
+i Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors
+
+
# better video device, more accurate and faster rendering, esp. on macos. Also should enable system fonts for display
+library(ragg) |>suppressPackageStartupMessages()
+
+setwd("/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion")
+here::i_am("chapter_3.qmd")
here() starts at /Users/kidwellj/gits/hacking_religion_textbook/hacking_religion
-
# Download administrative boundaries for whole UK at country level
-if (file.exists(here("data", "infuse_uk_2011_clipped.shp")) ==FALSE) {
-download.file("https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_uk_2011_clipped.zip", destfile ="data/infuse_uk_2011_clipped.zip")
-unzip("data/infuse_uk_2011_clipped.zip", exdir ="data")
-}
-uk_countries <-st_read(here("data", "infuse_uk_2011_clipped.shp"))
-
-
Reading layer `infuse_uk_2011_clipped' from data source
- `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/infuse_uk_2011_clipped.shp'
- using driver `ESRI Shapefile'
-Simple feature collection with 1 feature and 3 fields
-Geometry type: MULTIPOLYGON
-Dimension: XY
-Bounding box: xmin: -69.1254 ymin: 5337.9 xmax: 655604.7 ymax: 1220302
-Projected CRS: OSGB36 / British National Grid
-
-
# Download administrative boundaries for whole UK at regions level
-if (file.exists(here("data", "infuse_rgn_2011_clipped.shp")) ==FALSE) {
-download.file("https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_rgn_2011_clipped.zip", destfile ="data/infuse_rgn_2011_clipped.zip")
-unzip("data/infuse_rgn_2011_clipped.zip", exdir ="data")
+
# Download administrative boundaries for whole UK at country level
+if (file.exists(here("data", "infuse_uk_2011_clipped.shp")) ==FALSE) {
+download.file("https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_uk_2011_clipped.zip", destfile ="data/infuse_uk_2011_clipped.zip")
+unzip("data/infuse_uk_2011_clipped.zip", exdir ="data")}
-uk_rgn <-st_read(here("data", "infuse_rgn_2011_clipped.shp"))
-
-
Reading layer `infuse_rgn_2011_clipped' from data source
- `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/infuse_rgn_2011_clipped.shp'
- using driver `ESRI Shapefile'
-Simple feature collection with 9 features and 2 fields
-Geometry type: MULTIPOLYGON
-Dimension: XY
-Bounding box: xmin: 82672 ymin: 5337.9 xmax: 655604.7 ymax: 657534.1
-Projected CRS: OSGB36 / British National Grid
-
-
# Download administrative boundaries for whole UK at local authority level
-if (file.exists(here("data", "infuse_dist_lyr_2011_clipped.shp")) ==FALSE) {
-download.file("https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_dist_lyr_2011_clipped.zip", destfile ="data/infuse_dist_lyr_2011_clipped.zip")
-unzip("data/infuse_dist_lyr_2011_clipped.zip", exdir ="data")
-}
-local_authorities <-st_read(here("data", "infuse_dist_lyr_2011_clipped.shp"))
-
-
Reading layer `infuse_dist_lyr_2011_clipped' from data source
- `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/infuse_dist_lyr_2011_clipped.shp'
- using driver `ESRI Shapefile'
-Simple feature collection with 404 features and 3 fields
-Geometry type: MULTIPOLYGON
-Dimension: XY
-Bounding box: xmin: -69.1254 ymin: 5337.9 xmax: 655604.7 ymax: 1220302
-Projected CRS: OSGB36 / British National Grid
-
-
# Download building outlines for whole UK
-if (file.exists(here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg")) ==FALSE) {
-download.file("https://zenodo.org/record/6395804/files/infuse_dist_lyr_2011_simplified_100m_buildings_overlay_simplified.gpkg", destfile =here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg"))}
-local_authorities_buildings_clip <-st_read(here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg"))
-
-
Reading layer `infuse_dist_lyr_2011_simplified_100_buildings_overlay_simplified' from data source `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg'
- using driver `GPKG'
-Simple feature collection with 403 features and 0 fields
-Geometry type: MULTIPOLYGON
-Dimension: XY
-Bounding box: xmin: -69.1254 ymin: 5524.797 xmax: 655986.4 ymax: 1219597
-Projected CRS: OSGB36 / British National Grid
-
+uk_countries <-st_read(here("data", "infuse_uk_2011_clipped.shp"), quiet =TRUE)
+
+# Download administrative boundaries for whole UK at regions level
+if (file.exists(here("data", "infuse_rgn_2011_clipped.shp")) ==FALSE) {
+download.file("https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_rgn_2011_clipped.zip", destfile ="data/infuse_rgn_2011_clipped.zip")
+unzip("data/infuse_rgn_2011_clipped.zip", exdir ="data")
+}
+uk_rgn <-st_read(here("data", "infuse_rgn_2011_clipped.shp"), quiet =TRUE)
+
+# Download administrative boundaries for whole UK at local authority level
+if (file.exists(here("data", "infuse_dist_lyr_2011_clipped.shp")) ==FALSE) {
+download.file("https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_dist_lyr_2011_clipped.zip", destfile ="data/infuse_dist_lyr_2011_clipped.zip")
+unzip("data/infuse_dist_lyr_2011_clipped.zip", exdir ="data")
+}
+local_authorities <-st_read(here("data", "infuse_dist_lyr_2011_clipped.shp"), quiet =TRUE)
+
+# Download building outlines for whole UK
+if (file.exists(here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg")) ==FALSE) {
+download.file("https://zenodo.org/record/6395804/files/infuse_dist_lyr_2011_simplified_100m_buildings_overlay_simplified.gpkg", destfile =here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg"))}
+local_authorities_buildings_clip <-st_read(here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg"), quiet =TRUE)
Before we move on, let’s plot a simple map and have a look at one of our administrative layers. We can use ggplot with a new type of shape geom_sf() to plot the contents of a geospatial data file with polygons which is loaded as a simplefeature in R.
# Note: for more advanced reproducible scripts which demonstrate how these data surces have been
-# obtained, see the companion cookbook here: https://github.com/kidwellj/hacking_religion_cookbook/blob/main/ordnance_survey.R
-
-os_openmap_pow <-st_read(here("data", "os_openmap_pow.gpkg"))
+
# Note: for more advanced reproducible scripts which demonstrate how these data surces have been
+# obtained, see the companion cookbook here: https://github.com/kidwellj/hacking_religion_cookbook/blob/main/ordnance_survey.R
+
+os_openmap_pow <-st_read(here("data", "os_openmap_pow.gpkg"), quiet =TRUE)
+
+bench_time(ggplot(os_openmap_pow) +geom_sf())
-
Reading layer `os_openmap_pow' from data source
- `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/os_openmap_pow.gpkg'
- using driver `GPKG'
-Simple feature collection with 48759 features and 5 fields
-Geometry type: POLYGON
-Dimension: XY
-Bounding box: xmin: 64594.12 ymin: 8287.54 xmax: 655238.1 ymax: 1214662
-Projected CRS: OSGB36 / British National Grid
-
-
ggplot(os_openmap_pow) +
-geom_sf()
-
-
+
process real
+ 1.17ms 1.15ms
It’s worth noting that the way that you load geospatial data in R has changed quite dramatically since 2020 with the introduction of the simplefeature class in R. Much of the documentation you will come across “out there” will make reference to a set of functions which are now deprecated.
Let’s use that data we’ve just loaded to make our first map:
-
# Generate choropleth map of respondent locations
-# using temporary palette here so that 0s are white
-library(tmap) |>suppressPackageStartupMessages()
-# palette <- c(white, "#a8ddb5", "#43a2ca")
-map1 <-tm_shape(local_authorities) +
-# tm_fill(col = "surveys_count", , palette = palette, title = "Concentration of survey respondents") +
-tm_borders(alpha=.5, lwd=0.1) +
-# for intermediate polygon geometries
-# tm_shape(local_authorities) +
-# tm_borders(lwd=0.6) +
-# for dots from original dataset
-# tm_dots("red", size = .05, alpha = .4) +
-tm_scale_bar(position =c("right", "bottom")) +
-tm_style("gray") +
-tm_credits("Data: UK Data Service (OGL)\n& Jeremy H. Kidwell,\nGraphic is CC-by-SA 4.0",
-size =0.4,
-position =c("left", "bottom"),
-just =c("left", "bottom"),
-align ="left") +
-tm_layout(asp =NA,
-frame =FALSE,
-title ="Figure 1a",
-title.size = .7,
-legend.title.size = .7,
-inner.margins =c(0.1, 0.1, 0.05, 0.05)
- )
-
-map1
+
# Generate choropleth map of respondent locations
+# using temporary palette here so that 0s are white
+library(tmap) |>suppressPackageStartupMessages()
+# palette <- c(white, "#a8ddb5", "#43a2ca")
+
+map1 <-tm_shape(local_authorities) +
+# tm_fill(col = "surveys_count", , palette = palette, title = "Concentration of survey respondents") +
+tm_borders(alpha=.5, lwd=0.1) +
+# for intermediate polygon geometries
+# tm_shape(local_authorities) +
+# tm_borders(lwd=0.6) +
+# for dots from original dataset
+# tm_dots("red", size = .05, alpha = .4) +
+tm_scale_bar(position =c("right", "bottom")) +
+tm_style("gray") +
+tm_credits("Data: UK Data Service (OGL)\n& Jeremy H. Kidwell,\nGraphic is CC-by-SA 4.0",
+size =0.4,
+position =c("left", "bottom"),
+just =c("left", "bottom"),
+align ="left") +
+tm_layout(asp =NA,
+frame =FALSE,
+title ="Figure 1a",
+title.size = .7,
+legend.title.size = .7,
+inner.margins =c(0.1, 0.1, 0.05, 0.05)
+ )
+
+map1
-
# save image
-tmap_save(map1, here("figures", "map.png"), width=1920, height=1080, asp=0)
+
# save image
+tmap_save(map1, here("figures", "map.png"), width=1920, height=1080, asp=0)
Map saved to /Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/figures/map.png
@@ -396,35 +368,29 @@ Projected CRS: OSGB36 / British National Grid
-
# subsetting ordnance survey openmap data for measuring clusters and proximity
-
-os_openmap_important_buildings <-st_read(here("data", "os_openmap_important_buildings.gpkg"))
-
-
Reading layer `important_buildings' from data source
- `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/os_openmap_important_buildings.gpkg'
- using driver `GPKG'
-Simple feature collection with 229800 features and 5 fields
-Geometry type: POLYGON
-Dimension: XY
-Bounding box: xmin: 64594.12 ymin: 8125.44 xmax: 655500.5 ymax: 1214662
-Projected CRS: OSGB36 / British National Grid
# subsetting ordnance survey openmap data for measuring clusters and proximity
+
+os_openmap_important_buildings <-st_read(here("data", "os_openmap_important_buildings.gpkg"), quiet =TRUE)
+
+# add pubs, check_cashing, pawnbrokers, SSSI
+## subsets
+
+
Count the number of churches in Local Authorities
+
-
# OSM data
-
-# Note: for more advanced reproducible scripts which demonstrate how these data surces have been
-# obtained, see the companion cookbook here: https://github.com/kidwellj/hacking_religion_cookbook/blob/main/ordnance_survey.R
-
-
-# osm_uk_points <- st_read(system.file(here("data", "pow_osm.gpkg", package = "spData")))
-# vector_filepath = system.file("data/osm-gb-2018Aug29_pow_osm.pbf", package = "sf")
-# osm_uk_points = st_read(vector_filepath)
+
# OSM data
+
+# Note: for more advanced reproducible scripts which demonstrate how these data surces have been
+# obtained, see the companion cookbook here: https://github.com/kidwellj/hacking_religion_cookbook/blob/main/ordnance_survey.R
+
+
+# osm_uk_points <- st_read(system.file(here("data", "pow_osm.gpkg", package = "spData")))
+# vector_filepath = system.file("data/osm-gb-2018Aug29_pow_osm.pbf", package = "sf")
+# osm_uk_points = st_read(vector_filepath)
Guides to geographies: https://rconsortium.github.io/censusguide/ https://ocsi.uk/2019/03/18/lsoas-leps-and-lookups-a-beginners-guide-to-statistical-geographies/
-
Extact places of worship from Ordnance survey open data set Calculate proximity to pubs
+
Calculate proximity to pubs
References
diff --git a/docs/search.json b/docs/search.json
index 5c80665..5f9b515 100644
--- a/docs/search.json
+++ b/docs/search.json
@@ -102,14 +102,14 @@
"href": "chapter_1.html#multifactor-visualisation",
"title": "2 The 2021 UK Census",
"section": "2.7 Multifactor Visualisation",
- "text": "2.7 Multifactor Visualisation\nOne element of R data analysis that can get really interesting is working with multiple variables. Above we’ve looked at the breakdown of religious affiliation across the whole of England and Wales (Scotland operates an independent census), and by placing this data alongside a specific region, we’ve already made a basic entry into working with multiple variables but this can get much more interesting. Adding an additional quantative variable (also known as bivariate data) into the mix, however can also generate a lot more information and we have to think about visualising it in different ways which can still communicate with visual clarity in spite of the additional visual noise which is inevitable with enhanced complexity. Let’s have a look at the way that religion in England and Wales breaks down by ethnicity.\n\n\n\n\n\n\nWhat is Nomis?\n\n\n\nFor the UK, census data is made available for programmatic research like this via an organisation called NOMIS. Luckily for us, there is an R library you can use to access nomis directly which greatly simplifies the process of pulling data down from the platform. It’s worth noting that if you’re not in the UK, there are similar options for other countries. Nearly every R textbook I’ve ever seen works with USA census data, so you’ll find plenty of documentation available on the tools you can use for US Census data. Similarly for the EU, Canada, Austrailia etc.\nIf you want to draw some data from the nomis platform yourself in R, have a look at the companion cookbook repository.\n\n\n\n# Get table of Census 2011 religion data from nomis\n# Note: for reproducible code used to generate the dataset used in the book, see the cookbook here: \nz <- readRDS(file = (here(\"example_data\", \"z.rds\")))\n\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2011_religion <- filter(z, GEOGRAPHY_NAME==\"England and Wales\" & RURAL_URBAN_NAME==\"Total\" & C_RELPUK11_NAME != \"All categories: Religion\")\n# Drop unnecessary columns\nuk_census_2011_religion <- select(uk_census_2011_religion, C_RELPUK11_NAME, OBS_VALUE)\n# Plot results\nplot1 <- ggplot(uk_census_2011_religion, aes(x = C_RELPUK11_NAME, y = OBS_VALUE)) + geom_bar(stat = \"identity\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n# ggsave(filename = \"plot.png\", plot = plot1)\n\n# grab daata from nomis for 2001 census religion / ethnicity\nz0 <- readRDS(file = (here(\"example_data\", \"z0.rds\")))\n\n# select relevant columns\nuk_census_2001_religion_ethnicity <- select(z0, GEOGRAPHY_NAME, C_RELPUK11_NAME, C_ETHHUK11_NAME, OBS_VALUE)\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2001_religion_ethnicity <- filter(uk_census_2001_religion_ethnicity, GEOGRAPHY_NAME==\"England and Wales\" & C_RELPUK11_NAME != \"All categories: Religion\")\n# Simplify data to only include general totals and omit subcategories\nuk_census_2001_religion_ethnicity <- uk_census_2001_religion_ethnicity %>% filter(grepl('Total', C_ETHHUK11_NAME))\n\n# grab data from nomis for 2011 census religion / ethnicity table\nz1 <- readRDS(file = (here(\"example_data\", \"z1.rds\")))\n\n# select relevant columns\nuk_census_2011_religion_ethnicity <- select(z1, GEOGRAPHY_NAME, C_RELPUK11_NAME, C_ETHPUK11_NAME, OBS_VALUE)\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2011_religion_ethnicity <- filter(uk_census_2011_religion_ethnicity, GEOGRAPHY_NAME==\"England and Wales\" & C_RELPUK11_NAME != \"All categories: Religion\" & C_ETHPUK11_NAME != \"All categories: Ethnic group\")\n# Simplify data to only include general totals and omit subcategories\nuk_census_2011_religion_ethnicity <- uk_census_2011_religion_ethnicity %>% filter(grepl('Total', C_ETHPUK11_NAME))\n\n# grab data from nomis for 2021 census religion / ethnicity table\nz2 <- readRDS(file = (here(\"example_data\", \"z2.rds\")))\n\n# select relevant columns\nuk_census_2021_religion_ethnicity <- select(z2, GEOGRAPHY_NAME, C2021_RELIGION_10_NAME, C2021_ETH_8_NAME, OBS_VALUE)\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2021_religion_ethnicity <- filter(uk_census_2021_religion_ethnicity, GEOGRAPHY_NAME==\"England and Wales\" & C2021_RELIGION_10_NAME != \"Total\" & C2021_ETH_8_NAME != \"Total\")\n# 2021 census includes white sub-groups so we need to omit those so we just have totals:\nuk_census_2021_religion_ethnicity <- filter(uk_census_2021_religion_ethnicity, C2021_ETH_8_NAME != \"White: English, Welsh, Scottish, Northern Irish or British\" & C2021_ETH_8_NAME != \"White: Irish\" & C2021_ETH_8_NAME != \"White: Gypsy or Irish Traveller, Roma or Other White\")\n\nggplot(uk_census_2011_religion_ethnicity, aes(fill=C_ETHPUK11_NAME, x=C_RELPUK11_NAME, y=OBS_VALUE)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + scale_fill_brewer(palette = \"Set1\") + ggtitle(\"Religious Affiliation in the 2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nThe trouble with using grouped bars here, as you can see, is that there are quite sharp disparities which make it hard to compare in meaningful ways. We could use logarithmic rather than linear scaling as an option, but this is hard for many general public audiences to apprecaite without guidance. One alternative quick fix is to extract data from “white” respondents which can then be placed in a separate chart with a different scale.\n\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2011_religion_ethnicity_white <- filter(uk_census_2011_religion_ethnicity, C_ETHPUK11_NAME == \"White: Total\")\nuk_census_2011_religion_ethnicity_nonwhite <- filter(uk_census_2011_religion_ethnicity, C_ETHPUK11_NAME != \"White: Total\")\n\nggplot(uk_census_2011_religion_ethnicity_nonwhite, aes(fill=C_ETHPUK11_NAME, x=C_RELPUK11_NAME, y=OBS_VALUE)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + scale_fill_brewer(palette = \"Set1\") + ggtitle(\"Religious Affiliation in the 2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nThis still doesn’t quite render with as much visual clarity and communication as I’d like. For a better look, we can use a technique in R called “faceting” to create a series of small charts which can be viewed alongside one another.\n\nggplot(uk_census_2011_religion_ethnicity_nonwhite, aes(x=C_RELPUK11_NAME, y=OBS_VALUE)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~C_ETHPUK11_NAME, ncol = 2) + scale_fill_brewer(palette = \"Set1\") + ggtitle(\"Religious Affiliation in the 2011 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nFor our finale chart, I’d like to take the faceted chart we’ve just done, and add in totals for the previous two census years (2001 and 2011) so we can see how trends are changing in terms of religious affiliation within ethnic self-identification categories. We’ll draw on some techniques we’re already developed above using rbind() to connect up each of these charts (after we’ve added a column identifying each chart by the census year). We will also need to use one new technique to change the wording of ethnic categories as this isn’t consistent from one census to the next and ggplot will struggle to chart things if the terms being used are exactly the same. We’ll use mutate() again to accomplish this with some slightly different code.\n\n# First add column to each dataframe so we don't lose track of the census it comes from:\nuk_census_2001_religion_ethnicity$dataset <- c(\"2001\")\nuk_census_2011_religion_ethnicity$dataset <- c(\"2011\")\nuk_census_2021_religion_ethnicity$dataset <- c(\"2021\")\n\n# Let's tidy the names of each column:\n\nnames(uk_census_2001_religion_ethnicity) <- c(\"Geography\", \"Religion\", \"Ethnicity\", \"Value\", \"Year\")\nnames(uk_census_2011_religion_ethnicity) <- c(\"Geography\", \"Religion\", \"Ethnicity\", \"Value\", \"Year\")\nnames(uk_census_2021_religion_ethnicity) <- c(\"Geography\", \"Religion\", \"Ethnicity\", \"Value\", \"Year\")\n\n# Next we need to change the terms using mutate()\nuk_census_2001_religion_ethnicity <- uk_census_2001_religion_ethnicity %>% \n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^White: Total$\", replacement = \"White\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Mixed: Total$\", replacement = \"Mixed\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Asian: Total$\", replacement = \"Asian\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Black or Black British: Total$\", replacement = \"Black\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Chinese or Other ethnic group: Total$\", replacement = \"Other\"))\n \nuk_census_2011_religion_ethnicity <- uk_census_2011_religion_ethnicity %>% \n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^White: Total$\", replacement = \"White\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Mixed/multiple ethnic group: Total$\", replacement = \"Mixed\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Asian/Asian British: Total$\", replacement = \"Asian\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Black/African/Caribbean/Black British: Total$\", replacement = \"Black\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Other ethnic group: Total$\", replacement = \"Other\"))\n\nuk_census_2021_religion_ethnicity <- uk_census_2021_religion_ethnicity %>% \n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^White: Total$\", replacement = \"White\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Mixed or Multiple ethnic groups$\", replacement = \"Mixed\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Asian, Asian British or Asian Welsh$\", replacement = \"Asian\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Black, Black British, Black Welsh, Caribbean or African$\", replacement = \"Black\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Other ethnic group$\", replacement = \"Other\"))\n\n# Now let's merge the tables:\n\nuk_census_merged_religion_ethnicity <- rbind(uk_census_2021_religion_ethnicity, uk_census_2011_religion_ethnicity)\n\nuk_census_merged_religion_ethnicity <- rbind(uk_census_merged_religion_ethnicity, uk_census_2001_religion_ethnicity)\n\n# As above, we'll split out non-white and white:\n\nuk_census_merged_religion_ethnicity_nonwhite <- filter(uk_census_merged_religion_ethnicity, Ethnicity != \"White\")\n\n# Time to plot!\n\nggplot(uk_census_merged_religion_ethnicity_nonwhite, aes(fill=Year, x=Religion, y=Value)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~Ethnicity, ncol = 2) + scale_fill_brewer(palette = \"Set1\") + ggtitle(\"Religious Affiliation in the 2001-2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nThere are a few formatting issues which remain. Our y-axis number labels are in scientific format which isn’t really very easy to read. You can use the very powerful and flexible scales() library to bring in some more readable formatting of numbers in a variety of places in R including in ggplot visualizations.\n\nlibrary(scales) |> suppressPackageStartupMessages()\nggplot(uk_census_merged_religion_ethnicity_nonwhite, aes(fill=Year, x=Religion, y=Value)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~Ethnicity, ncol = 2) + scale_fill_brewer(palette = \"Set1\") + scale_y_continuous(labels = unit_format(unit = \"M\", scale = 1e-6), breaks = breaks_extended(8)) + ggtitle(\"Religious Affiliation in the 2001-2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n# https://ggplot2-book.org/scales-position#sec-position-continuous-breaks\n\nThis chart shows an increase in almost every category, though it’s a bit hard to read in some cases. However, this information is based on the increase in raw numbers. It’s possbile that numbers may be going up, but in some cases the percentage share for a particular category has actually gone down. Let’s transform and visualise our data as percentages to see what kind of trends we can actually isolate:\n\nuk_census_merged_religion_ethnicity <- uk_census_merged_religion_ethnicity %>%\n group_by(Ethnicity, Year) %>%\n dplyr::mutate(Percent = Value/sum(Value))\n\nggplot(uk_census_merged_religion_ethnicity, aes(fill=Year, x=Religion, y=Percent)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~Ethnicity, scales=\"free_x\") + scale_fill_brewer(palette = \"Set1\") + scale_y_continuous(labels = scales::percent) + ggtitle(\"Religious Affiliation in the 2001-2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nNow you can see why this shift is important - the visualisation tells a completely different story in some cases across the two different charts. In the first, working off raw numbers we see a net increase in Christianity across all categories. But if we take into account the fact that the overall share of population is growing for each of these groups, their actual composition is changing in a different direction. The proportion of each group is declining across the three census periods (albeit with an exception for the “Other” category from 2011 to 2021).\nTo highlight a few features of this final plot, I’ve used a specific feature within facet_wrap scales = \"free_x\" to let each of the individual facets adjust the total range on the x-axis. Since we’re looking at trends here and not absolute values, having correspondence across scales isn’t important and this makes for something a bit more visually tidy. I’ve also shifted the code for scale_y_continuous to render values as percentages (rather than millions).\nIn case you want to print this plot out and hang it on your wall, you can use the ggsave tool to render the chart as an image file:\n\nplot1 <- ggplot(uk_census_merged_religion_ethnicity, aes(fill=Year, x=Religion, y=Percent)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~Ethnicity, scales=\"free_x\") + scale_fill_brewer(palette = \"Set1\") + scale_y_continuous(labels = scales::percent) + ggtitle(\"Religious Affiliation in the 2001-2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\nggsave(\"chart.png\", plot=plot1, width = 8, height = 10, units=c(\"in\"))"
+ "text": "2.7 Multifactor Visualisation\nOne element of R data analysis that can get really interesting is working with multiple variables. Above we’ve looked at the breakdown of religious affiliation across the whole of England and Wales (Scotland operates an independent census), and by placing this data alongside a specific region, we’ve already made a basic entry into working with multiple variables but this can get much more interesting. Adding an additional quantative variable (also known as bivariate data) into the mix, however can also generate a lot more information and we have to think about visualising it in different ways which can still communicate with visual clarity in spite of the additional visual noise which is inevitable with enhanced complexity. Let’s have a look at the way that religion in England and Wales breaks down by ethnicity.\n\n\n\n\n\n\nWhat is Nomis?\n\n\n\nFor the UK, census data is made available for programmatic research like this via an organisation called NOMIS. Luckily for us, there is an R library you can use to access nomis directly which greatly simplifies the process of pulling data down from the platform. It’s worth noting that if you’re not in the UK, there are similar options for other countries. Nearly every R textbook I’ve ever seen works with USA census data, so you’ll find plenty of documentation available on the tools you can use for US Census data. Similarly for the EU, Canada, Austrailia etc.\nIf you want to draw some data from the nomis platform yourself in R, have a look at the companion cookbook repository.\n\n\n\n# Get table of Census 2011 religion data from nomis\nz <- readRDS(file = (here(\"example_data\", \"z.rds\")))\n\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2011_religion <- filter(z, GEOGRAPHY_NAME==\"England and Wales\" & RURAL_URBAN_NAME==\"Total\" & C_RELPUK11_NAME != \"All categories: Religion\")\n# Drop unnecessary columns\nuk_census_2011_religion <- select(uk_census_2011_religion, C_RELPUK11_NAME, OBS_VALUE)\n# Plot results\nplot1 <- ggplot(uk_census_2011_religion, aes(x = C_RELPUK11_NAME, y = OBS_VALUE)) + geom_bar(stat = \"identity\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n# ggsave(filename = \"plot.png\", plot = plot1)\n\n# grab daata from nomis for 2001 census religion / ethnicity\nz0 <- readRDS(file = (here(\"example_data\", \"z0.rds\")))\n\n# select relevant columns\nuk_census_2001_religion_ethnicity <- select(z0, GEOGRAPHY_NAME, C_RELPUK11_NAME, C_ETHHUK11_NAME, OBS_VALUE)\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2001_religion_ethnicity <- filter(uk_census_2001_religion_ethnicity, GEOGRAPHY_NAME==\"England and Wales\" & C_RELPUK11_NAME != \"All categories: Religion\")\n# Simplify data to only include general totals and omit subcategories\nuk_census_2001_religion_ethnicity <- uk_census_2001_religion_ethnicity %>% filter(grepl('Total', C_ETHHUK11_NAME))\n\n# grab data from nomis for 2011 census religion / ethnicity table\nz1 <- readRDS(file = (here(\"example_data\", \"z1.rds\")))\n\n# select relevant columns\nuk_census_2011_religion_ethnicity <- select(z1, GEOGRAPHY_NAME, C_RELPUK11_NAME, C_ETHPUK11_NAME, OBS_VALUE)\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2011_religion_ethnicity <- filter(uk_census_2011_religion_ethnicity, GEOGRAPHY_NAME==\"England and Wales\" & C_RELPUK11_NAME != \"All categories: Religion\" & C_ETHPUK11_NAME != \"All categories: Ethnic group\")\n# Simplify data to only include general totals and omit subcategories\nuk_census_2011_religion_ethnicity <- uk_census_2011_religion_ethnicity %>% filter(grepl('Total', C_ETHPUK11_NAME))\n\n# grab data from nomis for 2021 census religion / ethnicity table\nz2 <- readRDS(file = (here(\"example_data\", \"z2.rds\")))\n\n# select relevant columns\nuk_census_2021_religion_ethnicity <- select(z2, GEOGRAPHY_NAME, C2021_RELIGION_10_NAME, C2021_ETH_8_NAME, OBS_VALUE)\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2021_religion_ethnicity <- filter(uk_census_2021_religion_ethnicity, GEOGRAPHY_NAME==\"England and Wales\" & C2021_RELIGION_10_NAME != \"Total\" & C2021_ETH_8_NAME != \"Total\")\n# 2021 census includes white sub-groups so we need to omit those so we just have totals:\nuk_census_2021_religion_ethnicity <- filter(uk_census_2021_religion_ethnicity, C2021_ETH_8_NAME != \"White: English, Welsh, Scottish, Northern Irish or British\" & C2021_ETH_8_NAME != \"White: Irish\" & C2021_ETH_8_NAME != \"White: Gypsy or Irish Traveller, Roma or Other White\")\n\nggplot(uk_census_2011_religion_ethnicity, aes(fill=C_ETHPUK11_NAME, x=C_RELPUK11_NAME, y=OBS_VALUE)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + scale_fill_brewer(palette = \"Set1\") + ggtitle(\"Religious Affiliation in the 2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nThe trouble with using grouped bars here, as you can see, is that there are quite sharp disparities which make it hard to compare in meaningful ways. We could use logarithmic rather than linear scaling as an option, but this is hard for many general public audiences to apprecaite without guidance. One alternative quick fix is to extract data from “white” respondents which can then be placed in a separate chart with a different scale.\n\n# Filter down to simplified dataset with England / Wales and percentages without totals\nuk_census_2011_religion_ethnicity_white <- filter(uk_census_2011_religion_ethnicity, C_ETHPUK11_NAME == \"White: Total\")\nuk_census_2011_religion_ethnicity_nonwhite <- filter(uk_census_2011_religion_ethnicity, C_ETHPUK11_NAME != \"White: Total\")\n\nggplot(uk_census_2011_religion_ethnicity_nonwhite, aes(fill=C_ETHPUK11_NAME, x=C_RELPUK11_NAME, y=OBS_VALUE)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + scale_fill_brewer(palette = \"Set1\") + ggtitle(\"Religious Affiliation in the 2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nThis still doesn’t quite render with as much visual clarity and communication as I’d like. For a better look, we can use a technique in R called “faceting” to create a series of small charts which can be viewed alongside one another.\n\nggplot(uk_census_2011_religion_ethnicity_nonwhite, aes(x=C_RELPUK11_NAME, y=OBS_VALUE)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~C_ETHPUK11_NAME, ncol = 2) + scale_fill_brewer(palette = \"Set1\") + ggtitle(\"Religious Affiliation in the 2011 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nFor our finale chart, I’d like to take the faceted chart we’ve just done, and add in totals for the previous two census years (2001 and 2011) so we can see how trends are changing in terms of religious affiliation within ethnic self-identification categories. We’ll draw on some techniques we’re already developed above using rbind() to connect up each of these charts (after we’ve added a column identifying each chart by the census year). We will also need to use one new technique to change the wording of ethnic categories as this isn’t consistent from one census to the next and ggplot will struggle to chart things if the terms being used are exactly the same. We’ll use mutate() again to accomplish this with some slightly different code.\n\n# First add column to each dataframe so we don't lose track of the census it comes from:\nuk_census_2001_religion_ethnicity$dataset <- c(\"2001\")\nuk_census_2011_religion_ethnicity$dataset <- c(\"2011\")\nuk_census_2021_religion_ethnicity$dataset <- c(\"2021\")\n\n# Let's tidy the names of each column:\n\nnames(uk_census_2001_religion_ethnicity) <- c(\"Geography\", \"Religion\", \"Ethnicity\", \"Value\", \"Year\")\nnames(uk_census_2011_religion_ethnicity) <- c(\"Geography\", \"Religion\", \"Ethnicity\", \"Value\", \"Year\")\nnames(uk_census_2021_religion_ethnicity) <- c(\"Geography\", \"Religion\", \"Ethnicity\", \"Value\", \"Year\")\n\n# Next we need to change the terms using mutate()\nuk_census_2001_religion_ethnicity <- uk_census_2001_religion_ethnicity %>% \n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^White: Total$\", replacement = \"White\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Mixed: Total$\", replacement = \"Mixed\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Asian: Total$\", replacement = \"Asian\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Black or Black British: Total$\", replacement = \"Black\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Chinese or Other ethnic group: Total$\", replacement = \"Other\"))\n \nuk_census_2011_religion_ethnicity <- uk_census_2011_religion_ethnicity %>% \n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^White: Total$\", replacement = \"White\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Mixed/multiple ethnic group: Total$\", replacement = \"Mixed\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Asian/Asian British: Total$\", replacement = \"Asian\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Black/African/Caribbean/Black British: Total$\", replacement = \"Black\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Other ethnic group: Total$\", replacement = \"Other\"))\n\nuk_census_2021_religion_ethnicity <- uk_census_2021_religion_ethnicity %>% \n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^White: Total$\", replacement = \"White\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Mixed or Multiple ethnic groups$\", replacement = \"Mixed\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Asian, Asian British or Asian Welsh$\", replacement = \"Asian\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Black, Black British, Black Welsh, Caribbean or African$\", replacement = \"Black\")) %>%\n mutate(Ethnicity = str_replace_all(Ethnicity, \n pattern = \"^Other ethnic group$\", replacement = \"Other\"))\n\n# Now let's merge the tables:\n\nuk_census_merged_religion_ethnicity <- rbind(uk_census_2021_religion_ethnicity, uk_census_2011_religion_ethnicity)\n\nuk_census_merged_religion_ethnicity <- rbind(uk_census_merged_religion_ethnicity, uk_census_2001_religion_ethnicity)\n\n# As above, we'll split out non-white and white:\n\nuk_census_merged_religion_ethnicity_nonwhite <- filter(uk_census_merged_religion_ethnicity, Ethnicity != \"White\")\n\n# Time to plot!\n\nggplot(uk_census_merged_religion_ethnicity_nonwhite, aes(fill=Year, x=Religion, y=Value)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~Ethnicity, ncol = 2) + scale_fill_brewer(palette = \"Set1\") + ggtitle(\"Religious Affiliation in the 2001-2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nThere are a few formatting issues which remain. Our y-axis number labels are in scientific format which isn’t really very easy to read. You can use the very powerful and flexible scales() library to bring in some more readable formatting of numbers in a variety of places in R including in ggplot visualizations.\n\nlibrary(scales) |> suppressPackageStartupMessages()\nggplot(uk_census_merged_religion_ethnicity_nonwhite, aes(fill=Year, x=Religion, y=Value)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~Ethnicity, ncol = 2) + scale_fill_brewer(palette = \"Set1\") + scale_y_continuous(labels = unit_format(unit = \"M\", scale = 1e-6), breaks = breaks_extended(8)) + ggtitle(\"Religious Affiliation in the 2001-2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n# https://ggplot2-book.org/scales-position#sec-position-continuous-breaks\n\nThis chart shows an increase in almost every category, though it’s a bit hard to read in some cases. However, this information is based on the increase in raw numbers. It’s possbile that numbers may be going up, but in some cases the percentage share for a particular category has actually gone down. Let’s transform and visualise our data as percentages to see what kind of trends we can actually isolate:\n\nuk_census_merged_religion_ethnicity <- uk_census_merged_religion_ethnicity %>%\n group_by(Ethnicity, Year) %>%\n dplyr::mutate(Percent = Value/sum(Value))\n\nggplot(uk_census_merged_religion_ethnicity, aes(fill=Year, x=Religion, y=Percent)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~Ethnicity, scales=\"free_x\") + scale_fill_brewer(palette = \"Set1\") + scale_y_continuous(labels = scales::percent) + ggtitle(\"Religious Affiliation in the 2001-2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\n\n\n\nNow you can see why this shift is important - the visualisation tells a completely different story in some cases across the two different charts. In the first, working off raw numbers we see a net increase in Christianity across all categories. But if we take into account the fact that the overall share of population is growing for each of these groups, their actual composition is changing in a different direction. The proportion of each group is declining across the three census periods (albeit with an exception for the “Other” category from 2011 to 2021).\nTo highlight a few features of this final plot, I’ve used a specific feature within facet_wrap scales = \"free_x\" to let each of the individual facets adjust the total range on the x-axis. Since we’re looking at trends here and not absolute values, having correspondence across scales isn’t important and this makes for something a bit more visually tidy. I’ve also shifted the code for scale_y_continuous to render values as percentages (rather than millions).\nIn case you want to print this plot out and hang it on your wall, you can use the ggsave tool to render the chart as an image file:\n\nplot1 <- ggplot(uk_census_merged_religion_ethnicity, aes(fill=Year, x=Religion, y=Percent)) + geom_bar(position=\"dodge\", stat =\"identity\", colour = \"black\") + facet_wrap(~Ethnicity, scales=\"free_x\") + scale_fill_brewer(palette = \"Set1\") + scale_y_continuous(labels = scales::percent) + ggtitle(\"Religious Affiliation in the 2001-2021 Census of England and Wales\") + xlab(\"\") + ylab(\"\") + theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))\n\nggsave(\"chart.png\", plot=plot1, width = 8, height = 10, units=c(\"in\"))"
},
{
"objectID": "chapter_2.html",
"href": "chapter_2.html",
"title": "3 Survey Data: Spotlight Project",
"section": "",
- "text": "4 Loading in some data\n# R Setup -----------------------------------------------------------------\nsetwd(\"/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion\")\nlibrary(here)\n\nhere() starts at /Users/kidwellj/gits/hacking_religion_textbook\n\nlibrary(tidyverse)\n\n-- Attaching core tidyverse packages ------------------------ tidyverse 2.0.0 --\nv dplyr 1.1.3 v readr 2.1.4\nv forcats 1.0.0 v stringr 1.5.0\nv ggplot2 3.4.3 v tibble 3.2.1\nv lubridate 1.9.3 v tidyr 1.3.0\nv purrr 1.0.2 \n\n\n-- Conflicts ------------------------------------------ tidyverse_conflicts() --\nx dplyr::filter() masks stats::filter()\nx dplyr::lag() masks stats::lag()\ni Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors\n\nlibrary(haven) # used for importing SPSS .sav files\nhere::i_am(\"chapter_2.qmd\")\n\nhere() starts at /Users/kidwellj/gits/hacking_religion_textbook/hacking_religion\n\nclimate_experience_data <- read_sav(here(\"example_data\", \"climate_experience_data.sav\"))\nThe first thing to note here is that we’ve drawn in a different type of data file, this time from an .sav file, usully produced by the statistics software package SPSS. This uses a different R Library (I use haven for this). The upside is that in some cases where you have survey data with both a code and a value like “1” is eqivalent to “very much agree” this will preserve both in the R dataframe that is created. Now that you’ve loaded in data, you have a new R dataframe called “climate_experience_data” with a lot of columns with just under 1000 survey responses.\nOne of the challenges we faced when running this study is how to gather responsible data from surveys regarding religious identity. We’ll dive into this in depth as we do analysis and look at some of the agreements and conflicts in terms of respondent attribution. Just to set the stage, we used the following kinds of question to ask about religion and spirituality:\nThis is one way of measuring religion, that is, to ask a person if they consider themselves formally affiliated with a particular group. This kind of question has some (serious) limitations, but we’ll get to that in a moment.\nWe also asked respondents (Q57): “Regardless of whether you belong to a particular religion, how religious would you say you are?” and then provided a slider from 0 (not religious at all) to 10 (very religious).\nWe included some classic indicators about how often respondents go to worship (Q58): Apart from weddings, funerals and other special occasions, how often do you attend religious services? and (Q59): “Q59 Apart from when you are at religious services, how often do you pray?”\nEach of these measures a particular kind of dimension, and it is interesting to note that sometimes there are stronger correlations between how often a person attends worship services (weekly versus once a year) and a particular view, than there is between their affiliation (if they are Christian or Pagan). We’ll do some exploratory work shortly to see how this is the case in our sample. We also included a series of questions about spirituality in Q52 and used a nature relatedness scale Q51.\nYou’ll find that many surveys will only use one of these forms of question and ignore the rest. I think this is a really bad idea as religious belonging, identity, and spirituality are far too complex to work off a single form of response. We can also test out how these different attributions relate to other demographic features, like interest in politics, economic attainment, etc.\nLet’s dive into the data and see how this all works out. We’ll start with the question 56 data, around religious affiliation:\nreligious_affiliation <- as_tibble(as_factor(climate_experience_data$Q56))\nnames(religious_affiliation) <- c(\"response\")\nreligious_affiliation <- filter(religious_affiliation, !is.na(response))\nThere are few things we need to do here to get the data into initial proper shape. This might be called “cleaning” the data:\nIf we pause at this point to view the data, you’ll see it’s basically just a long list of survey responses. What we need is a count of each unique response (or factor). This will take a few more steps:\nreligious_affiliation_sums <- religious_affiliation %>% \n1 dplyr::count(response, sort = TRUE) %>%\n2 dplyr::mutate(response = forcats::fct_rev(forcats::fct_inorder(response)))\nreligious_affiliation_sums <- religious_affiliation_sums %>% \n3 dplyr::mutate(perc = scales::percent(n / sum(n), accuracy = .1, trim = FALSE))\n\n\n1\n\nFirst we generate new a dataframe with sums per category and\n\n2\n\n…sort in descending order\n\n3\n\nThen we add new column with percentages based on the sums you’ve just generated\nThat should give us a tidy table of results, which you can see if you view the contents of our new religious_affiliation_sums dataframe:\nhead(religious_affiliation_sums)\n\n# A tibble: 6 x 3\n response n perc \n <fct> <int> <chr> \n1 Christian 342 \"33.9%\"\n2 Muslim 271 \"26.9%\"\n3 No religion 108 \"10.7%\"\n4 Hindu 72 \" 7.1%\"\n5 Atheist 54 \" 5.4%\"\n6 Spiritual but not religious 38 \" 3.8%\"\n# make plot\nggplot(religious_affiliation_sums, aes(x = n, y = response)) +\n geom_col(colour = \"white\") + \n ## add percentage labels\n geom_text(aes(label = perc),\n ## make labels left-aligned and white\n hjust = 1, nudge_x = -.5, colour = \"white\", size=3)\nI’ve added one feature to our chart that wasn’t in the bar charts in chapter 1, text labels with the actual value on each bar.\nYou may be thinking about the plots we’ve just finished in chapter 1 and wondering how they compare. Let’s use the same facet approach that we’ve just used to render this data in a subsetted way.\n# First we need to add in data on ethnic self-identification from our respondents:\ndf <- select(climate_experience_data, Q56, Q0)\nreligious_affiliation_ethnicity <- as_tibble(as_factor(df))\nnames(religious_affiliation_ethnicity) <- c(\"Religion\", \"Ethnicity\")\n\nreligious_affiliation_ethnicity_sums <- religious_affiliation_ethnicity %>% \n group_by(Ethnicity) %>%\n dplyr::count(Religion, sort = TRUE) %>%\n dplyr::mutate(Religion = forcats::fct_rev(forcats::fct_inorder(Religion)))\n\nplot1 <- ggplot(religious_affiliation_ethnicity_sums, aes(x = n, y = Religion)) +\n geom_col(colour = \"white\") + facet_wrap(~Ethnicity, scales=\"free_x\")\n\nggsave(\"chart.png\", plot=plot1, width = 8, height = 10, units=c(\"in\"))\nUse mutate to put “prefer not to say” at the bottom # Info here: https://r4ds.had.co.nz/factors.html#modifying-factor-levels\ncaption <- “Christian Denomination” # TODO: copy plot above for Q56 to add two additional plots using climate_experience_data_named\\(Q56b and climate_experience_data_named\\)Q56c # Religious Affiliation b - Christian Denomination Subquestion christian_denomination <- qualtrics_process_single_multiple_choice(climate_experience_data_named\\(Q56b) christian_denomination_table <- chart_single_result_flextable(climate_experience_data_named\\)Q56b, desc(Count)) christian_denomination_table save_as_docx(christian_denomination_table, path = “./figures/q56_religious_affiliation_xn_denomination.docx”)\nchristian_denomination_hi <- filter(climate_experience_data_named, Q56 == “Christian”, Q57_bin == “high”) christian_denomination_hi <- qualtrics_process_single_multiple_choice(christian_denomination_hi$Q56b) christian_denomination_hi\ncaption <- “Islamic Identity” # Should the label be different than income since the data examined is the Affiliation? # TODO: adjust plot to factor using numbered responses on this question (perhaps also above) religious_affiliationc <- qualtrics_process_single_multiple_choice(climate_experience_data_named\\(Q56c) religious_affiliationc_plot <- plot_horizontal_bar(religious_affiliationc) religious_affiliationc_plot <- religious_affiliationc_plot + labs(caption = caption, x = \"\", y = \"\") religious_affiliationc_plot ggsave(\"figures/q56c_religious_affiliation.png\", width = 20, height = 10, units = \"cm\") religious_affiliationc_table <- chart_single_result_flextable(climate_experience_data_named\\)Q56c, Count) religious_affiliationc_table save_as_docx(religious_affiliationc_table, path = “./figures/q56_religious_affiliation_islam.docx”)\ncaption <- “Respondent Religiosity” religiosity <- qualtrics_process_single_multiple_choice(as.character(climate_experience_data_named\\(Q57_1)) religiosity_plot <- plot_horizontal_bar(religiosity) religiosity_plot <- religiosity_plot + labs(caption = caption, x = \"\", y = \"\") religiosity_plot ggsave(\"figures/q57_religiosity_plot.png\", width = 20, height = 10, units = \"cm\") religiosity_table <- chart_single_result_flextable(climate_experience_data_named\\)Q57_1, desc(Variable)) religiosity_table save_as_docx(religious_affiliationc_table, path = “./figures/q57_religiousity.docx”)\ncaption <- “Respondent Attendance of Religious Services” religious_service_attend <- qualtrics_process_single_multiple_choice(climate_experience_data_named\\(Q58) religious_service_attend_plot <- plot_horizontal_bar(religious_service_attend) religious_service_attend_plot <- religious_service_attend_plot + labs(title = caption, x = \"\", y = \"\") religious_service_attend_plot ggsave(\"figures/q58_religious_service_attend.png\", width = 20, height = 10, units = \"cm\") religious_service_attend_table <- chart_single_result_flextable(climate_experience_data_named\\)Q58, Count) religious_service_attend_table save_as_docx(religious_service_attend_table, path = “./figures/q58_religious_service_attend.docx”)\ndf <- select(climate_experience_data, Q52_bin, Q53_bin, Q57_bin, Q58) names(df) <- c(“Q52_bin”, “Q53_bin”, “Q57_bin”, “response”) facet_names <- c(Q52_bin = “Spirituality”, Q53_bin = “Politics L/R”, Q57_bin = “Religiosity”, low=“low”, medium=“medium”, high=“high”) facet_labeller <- function(variable,value){return(facet_names[value])} df\\(response <- factor(df\\)response, ordered = TRUE, levels = c(“1”, “2”, “3”, “4”, “5”)) df\\(response <- fct_recode(df\\)response, “More than once a week” = “1”, “Once a week” = “2”, “At least once a month” = “3”, “Only on special holy days” = “4”, “Never” = “5”) df %>% # we need to get the data including facet info in long format, so we use pivot_longer() pivot_longer(!response, names_to = “bin_name”, values_to = “b”) %>% # add counts for plot below count(response, bin_name, b) %>% group_by(bin_name,b) %>% mutate(perc=paste0(round(n*100/sum(n),1),“%”)) %>% # run ggplot ggplot(aes(x = n, y = ““, fill = response)) + geom_col(position=position_fill(), aes(fill=response)) + geom_text(aes(label = perc), position = position_fill(vjust=.5), size=2) + scale_fill_brewer(palette =”Dark2”, type = “qual”) + scale_x_continuous(labels = scales::percent_format()) + facet_grid(vars(b), vars(bin_name), labeller=as_labeller(facet_names)) + labs(caption = caption, x = ““, y =”“) + guides(fill = guide_legend(title = NULL)) ggsave(”figures/q58_faceted.png”, width = 30, height = 10, units = “cm”)\ncaption <- “Respondent Prayer Outside of Religious Services” prayer <- qualtrics_process_single_multiple_choice(climate_experience_data_named\\(Q59) prayer_plot <- plot_horizontal_bar(prayer) prayer_plot <- prayer_plot + labs(caption = caption, x = \"\", y = \"\") prayer_plot ggsave(\"figures/q59_prayer.png\", width = 20, height = 10, units = \"cm\") prayer_table <- chart_single_result_flextable(climate_experience_data_named\\)Q59, Count) prayer_table save_as_docx(prayer_table, path = “./figures/q59_prayer.docx”)\ndf <- select(climate_experience_data, Q52_bin, Q53_bin, Q57_bin, Q59) names(df) <- c(“Q52_bin”, “Q53_bin”, “Q57_bin”, “response”) facet_names <- c(Q52_bin = “Spirituality”, Q53_bin = “Politics L/R”, Q57_bin = “Religiosity”, low=“low”, medium=“medium”, high=“high”) facet_labeller <- function(variable,value){return(facet_names[value])} df\\(response <- factor(df\\)response, ordered = TRUE, levels = c(“1”, “2”, “3”, “4”, “5”)) df\\(response <- fct_recode(df\\)response, “More than once a week” = “1”, “Once a week” = “2”, “At least once a month” = “3”, “Only on special holy days” = “4”, “Never” = “5”) df %>% # we need to get the data including facet info in long format, so we use pivot_longer() pivot_longer(!response, names_to = “bin_name”, values_to = “b”) %>% # add counts for plot below count(response, bin_name, b) %>% group_by(bin_name,b) %>% mutate(perc=paste0(round(n*100/sum(n),1),“%”)) %>% # run ggplot ggplot(aes(x = n, y = ““, fill = response)) + geom_col(position=position_fill(), aes(fill=response)) + geom_text(aes(label = perc), position = position_fill(vjust=.5), size=2) + scale_fill_brewer(palette =”Dark2”, type = “qual”) + scale_x_continuous(labels = scales::percent_format()) + facet_grid(vars(b), vars(bin_name), labeller=as_labeller(facet_names)) + labs(caption = caption, x = ““, y =”“) + guides(fill = guide_legend(title = NULL)) ggsave(”figures/q59_faceted.png”, width = 30, height = 10, units = “cm”)\nq6_data <- qualtrics_process_single_multiple_choice_unsorted_streamlined(climate_experience_data$Q6)\ntitle <- “Do you think the climate is changing?”\nlevel_order <- c(“Don<80><99>t know”, “Definitely not changing”, “Probably not changing”, “Probably changing”, “Definitely changing”) ## code if a specific palette is needed for matching fill = wheel(ochre, num = as.integer(count(q6_data[1]))) # make plot q6_data_plot <- ggplot(q6_data, aes(x = n, y = response, fill = fill)) + geom_col(colour = “white”) + ## add percentage labels geom_text(aes(label = perc), ## make labels left-aligned and white hjust = 1, colour = “black”, size=4) + # use nudge_x = 30, to shift position ## reduce spacing between labels and bars scale_fill_identity(guide = “none”) + ## get rid of all elements except y axis labels + adjust plot margin theme_ipsum_rc() + theme(plot.margin = margin(rep(15, 4))) + easy_center_title() + # with thanks for helpful info on doing wrap here: https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2 scale_y_discrete(labels = wrap_format(30), limits = level_order) + theme(plot.title = element_text(size =18, hjust = 0.5), axis.text.y = element_text(size =16)) + labs(title = title, x = ““, y =”“)\nq6_data_plot\nggsave(“figures/q6.png”, width = 18, height = 12, units = “cm”)\nclimate_experience_data$Q51_score <- rowMeans(select(climate_experience_data, Q51_remote_vacation:Q51_heritage))\nclimate_experience_data <- climate_experience_data %>% mutate( Q51_bin = case_when( Q51_score > mean(Q51_score) + sd(Q51_score) ~ “high”, Q51_score < mean(Q51_score) - sd(Q51_score) ~ “low”, TRUE ~ “medium” ) %>% factor(levels = c(“low”, “medium”, “high”)) )\nclimate_experience_data$Q52_score <- rowMeans(select(climate_experience_data, Q52a_1:Q52f_1))\nclimate_experience_data <- climate_experience_data %>% mutate( Q52_bin = case_when( Q52_score > mean(Q52_score) + sd(Q52_score) ~ “high”, Q52_score < mean(Q52_score) - sd(Q52_score) ~ “low”, TRUE ~ “medium” ) %>% factor(levels = c(“low”, “medium”, “high”)) )"
+ "text": "4 Loading in some data\n# R Setup -----------------------------------------------------------------\nsetwd(\"/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion\")\nlibrary(here) |> suppressPackageStartupMessages()\nlibrary(tidyverse) |> suppressPackageStartupMessages()\n# used for importing SPSS .sav files\nlibrary(haven) |> suppressPackageStartupMessages()\nhere::i_am(\"chapter_2.qmd\")\n\nhere() starts at /Users/kidwellj/gits/hacking_religion_textbook/hacking_religion\n\nclimate_experience_data <- read_sav(here(\"example_data\", \"climate_experience_data.sav\"))\nThe first thing to note here is that we’ve drawn in a different type of data file, this time from an .sav file, usully produced by the statistics software package SPSS. This uses a different R Library (I use haven for this). The upside is that in some cases where you have survey data with both a code and a value like “1” is eqivalent to “very much agree” this will preserve both in the R dataframe that is created. Now that you’ve loaded in data, you have a new R dataframe called “climate_experience_data” with a lot of columns with just under 1000 survey responses.\nOne of the challenges we faced when running this study is how to gather responsible data from surveys regarding religious identity. We’ll dive into this in depth as we do analysis and look at some of the agreements and conflicts in terms of respondent attribution. Just to set the stage, we used the following kinds of question to ask about religion and spirituality:\nThis is one way of measuring religion, that is, to ask a person if they consider themselves formally affiliated with a particular group. This kind of question has some (serious) limitations, but we’ll get to that in a moment.\nWe also asked respondents (Q57): “Regardless of whether you belong to a particular religion, how religious would you say you are?” and then provided a slider from 0 (not religious at all) to 10 (very religious).\nWe included some classic indicators about how often respondents go to worship (Q58): Apart from weddings, funerals and other special occasions, how often do you attend religious services? and (Q59): “Q59 Apart from when you are at religious services, how often do you pray?”\nEach of these measures a particular kind of dimension, and it is interesting to note that sometimes there are stronger correlations between how often a person attends worship services (weekly versus once a year) and a particular view, than there is between their affiliation (if they are Christian or Pagan). We’ll do some exploratory work shortly to see how this is the case in our sample. We also included a series of questions about spirituality in Q52 and used a nature relatedness scale Q51.\nYou’ll find that many surveys will only use one of these forms of question and ignore the rest. I think this is a really bad idea as religious belonging, identity, and spirituality are far too complex to work off a single form of response. We can also test out how these different attributions relate to other demographic features, like interest in politics, economic attainment, etc.\nLet’s dive into the data and see how this all works out. We’ll start with the question 56 data, around religious affiliation:\nreligious_affiliation <- as_tibble(as_factor(climate_experience_data$Q56))\nnames(religious_affiliation) <- c(\"response\")\nreligious_affiliation <- filter(religious_affiliation, !is.na(response))\nThere are few things we need to do here to get the data into initial proper shape. This might be called “cleaning” the data:\nIf we pause at this point to view the data, you’ll see it’s basically just a long list of survey responses. What we need is a count of each unique response (or factor). This will take a few more steps:\nreligious_affiliation_sums <- religious_affiliation %>% \n1 dplyr::count(response, sort = TRUE) %>%\n2 dplyr::mutate(response = forcats::fct_rev(forcats::fct_inorder(response)))\nreligious_affiliation_sums <- religious_affiliation_sums %>% \n3 dplyr::mutate(perc = scales::percent(n / sum(n), accuracy = .1, trim = FALSE))\n\n\n1\n\nFirst we generate new a dataframe with sums per category and\n\n2\n\n…sort in descending order\n\n3\n\nThen we add new column with percentages based on the sums you’ve just generated\nThat should give us a tidy table of results, which you can see if you view the contents of our new religious_affiliation_sums dataframe:\nhead(religious_affiliation_sums)\n\n# A tibble: 6 x 3\n response n perc \n <fct> <int> <chr> \n1 Christian 342 \"33.9%\"\n2 Muslim 271 \"26.9%\"\n3 No religion 108 \"10.7%\"\n4 Hindu 72 \" 7.1%\"\n5 Atheist 54 \" 5.4%\"\n6 Spiritual but not religious 38 \" 3.8%\"\n# make plot\nggplot(religious_affiliation_sums, aes(x = n, y = response)) +\n geom_col(colour = \"white\") + \n ## add percentage labels\n geom_text(aes(label = perc),\n ## make labels left-aligned and white\n hjust = 1, nudge_x = -.5, colour = \"white\", size=3)\nI’ve added one feature to our chart that wasn’t in the bar charts in chapter 1, text labels with the actual value on each bar.\nYou may be thinking about the plots we’ve just finished in chapter 1 and wondering how they compare. Let’s use the same facet approach that we’ve just used to render this data in a subsetted way.\n# First we need to add in data on ethnic self-identification from our respondents:\ndf <- select(climate_experience_data, Q56, Q0)\nreligious_affiliation_ethnicity <- as_tibble(as_factor(df))\nnames(religious_affiliation_ethnicity) <- c(\"Religion\", \"Ethnicity\")\n\nreligious_affiliation_ethnicity_sums <- religious_affiliation_ethnicity %>% \n group_by(Ethnicity) %>%\n dplyr::count(Religion, sort = TRUE) %>%\n dplyr::mutate(Religion = forcats::fct_rev(forcats::fct_inorder(Religion)))\n\nplot1 <- ggplot(religious_affiliation_ethnicity_sums, aes(x = n, y = Religion)) +\n geom_col(colour = \"white\") + facet_wrap(~Ethnicity, scales=\"free_x\")\n\nggsave(\"chart.png\", plot=plot1, width = 8, height = 10, units=c(\"in\"))\nUse mutate to put “prefer not to say” at the bottom # Info here: https://r4ds.had.co.nz/factors.html#modifying-factor-levels\ncaption <- “Christian Denomination” # TODO: copy plot above for Q56 to add two additional plots using climate_experience_data_named\\(Q56b and climate_experience_data_named\\)Q56c # Religious Affiliation b - Christian Denomination Subquestion christian_denomination <- qualtrics_process_single_multiple_choice(climate_experience_data_named\\(Q56b) christian_denomination_table <- chart_single_result_flextable(climate_experience_data_named\\)Q56b, desc(Count)) christian_denomination_table save_as_docx(christian_denomination_table, path = “./figures/q56_religious_affiliation_xn_denomination.docx”)\nchristian_denomination_hi <- filter(climate_experience_data_named, Q56 == “Christian”, Q57_bin == “high”) christian_denomination_hi <- qualtrics_process_single_multiple_choice(christian_denomination_hi$Q56b) christian_denomination_hi\ncaption <- “Islamic Identity” # Should the label be different than income since the data examined is the Affiliation? # TODO: adjust plot to factor using numbered responses on this question (perhaps also above) religious_affiliationc <- qualtrics_process_single_multiple_choice(climate_experience_data_named\\(Q56c) religious_affiliationc_plot <- plot_horizontal_bar(religious_affiliationc) religious_affiliationc_plot <- religious_affiliationc_plot + labs(caption = caption, x = \"\", y = \"\") religious_affiliationc_plot ggsave(\"figures/q56c_religious_affiliation.png\", width = 20, height = 10, units = \"cm\") religious_affiliationc_table <- chart_single_result_flextable(climate_experience_data_named\\)Q56c, Count) religious_affiliationc_table save_as_docx(religious_affiliationc_table, path = “./figures/q56_religious_affiliation_islam.docx”)\ncaption <- “Respondent Religiosity” religiosity <- qualtrics_process_single_multiple_choice(as.character(climate_experience_data_named\\(Q57_1)) religiosity_plot <- plot_horizontal_bar(religiosity) religiosity_plot <- religiosity_plot + labs(caption = caption, x = \"\", y = \"\") religiosity_plot ggsave(\"figures/q57_religiosity_plot.png\", width = 20, height = 10, units = \"cm\") religiosity_table <- chart_single_result_flextable(climate_experience_data_named\\)Q57_1, desc(Variable)) religiosity_table save_as_docx(religious_affiliationc_table, path = “./figures/q57_religiousity.docx”)\ncaption <- “Respondent Attendance of Religious Services” religious_service_attend <- qualtrics_process_single_multiple_choice(climate_experience_data_named\\(Q58) religious_service_attend_plot <- plot_horizontal_bar(religious_service_attend) religious_service_attend_plot <- religious_service_attend_plot + labs(title = caption, x = \"\", y = \"\") religious_service_attend_plot ggsave(\"figures/q58_religious_service_attend.png\", width = 20, height = 10, units = \"cm\") religious_service_attend_table <- chart_single_result_flextable(climate_experience_data_named\\)Q58, Count) religious_service_attend_table save_as_docx(religious_service_attend_table, path = “./figures/q58_religious_service_attend.docx”)\ndf <- select(climate_experience_data, Q52_bin, Q53_bin, Q57_bin, Q58) names(df) <- c(“Q52_bin”, “Q53_bin”, “Q57_bin”, “response”) facet_names <- c(Q52_bin = “Spirituality”, Q53_bin = “Politics L/R”, Q57_bin = “Religiosity”, low=“low”, medium=“medium”, high=“high”) facet_labeller <- function(variable,value){return(facet_names[value])} df\\(response <- factor(df\\)response, ordered = TRUE, levels = c(“1”, “2”, “3”, “4”, “5”)) df\\(response <- fct_recode(df\\)response, “More than once a week” = “1”, “Once a week” = “2”, “At least once a month” = “3”, “Only on special holy days” = “4”, “Never” = “5”) df %>% # we need to get the data including facet info in long format, so we use pivot_longer() pivot_longer(!response, names_to = “bin_name”, values_to = “b”) %>% # add counts for plot below count(response, bin_name, b) %>% group_by(bin_name,b) %>% mutate(perc=paste0(round(n*100/sum(n),1),“%”)) %>% # run ggplot ggplot(aes(x = n, y = ““, fill = response)) + geom_col(position=position_fill(), aes(fill=response)) + geom_text(aes(label = perc), position = position_fill(vjust=.5), size=2) + scale_fill_brewer(palette =”Dark2”, type = “qual”) + scale_x_continuous(labels = scales::percent_format()) + facet_grid(vars(b), vars(bin_name), labeller=as_labeller(facet_names)) + labs(caption = caption, x = ““, y =”“) + guides(fill = guide_legend(title = NULL)) ggsave(”figures/q58_faceted.png”, width = 30, height = 10, units = “cm”)\ncaption <- “Respondent Prayer Outside of Religious Services” prayer <- qualtrics_process_single_multiple_choice(climate_experience_data_named\\(Q59) prayer_plot <- plot_horizontal_bar(prayer) prayer_plot <- prayer_plot + labs(caption = caption, x = \"\", y = \"\") prayer_plot ggsave(\"figures/q59_prayer.png\", width = 20, height = 10, units = \"cm\") prayer_table <- chart_single_result_flextable(climate_experience_data_named\\)Q59, Count) prayer_table save_as_docx(prayer_table, path = “./figures/q59_prayer.docx”)\ndf <- select(climate_experience_data, Q52_bin, Q53_bin, Q57_bin, Q59) names(df) <- c(“Q52_bin”, “Q53_bin”, “Q57_bin”, “response”) facet_names <- c(Q52_bin = “Spirituality”, Q53_bin = “Politics L/R”, Q57_bin = “Religiosity”, low=“low”, medium=“medium”, high=“high”) facet_labeller <- function(variable,value){return(facet_names[value])} df\\(response <- factor(df\\)response, ordered = TRUE, levels = c(“1”, “2”, “3”, “4”, “5”)) df\\(response <- fct_recode(df\\)response, “More than once a week” = “1”, “Once a week” = “2”, “At least once a month” = “3”, “Only on special holy days” = “4”, “Never” = “5”) df %>% # we need to get the data including facet info in long format, so we use pivot_longer() pivot_longer(!response, names_to = “bin_name”, values_to = “b”) %>% # add counts for plot below count(response, bin_name, b) %>% group_by(bin_name,b) %>% mutate(perc=paste0(round(n*100/sum(n),1),“%”)) %>% # run ggplot ggplot(aes(x = n, y = ““, fill = response)) + geom_col(position=position_fill(), aes(fill=response)) + geom_text(aes(label = perc), position = position_fill(vjust=.5), size=2) + scale_fill_brewer(palette =”Dark2”, type = “qual”) + scale_x_continuous(labels = scales::percent_format()) + facet_grid(vars(b), vars(bin_name), labeller=as_labeller(facet_names)) + labs(caption = caption, x = ““, y =”“) + guides(fill = guide_legend(title = NULL)) ggsave(”figures/q59_faceted.png”, width = 30, height = 10, units = “cm”)\nq6_data <- qualtrics_process_single_multiple_choice_unsorted_streamlined(climate_experience_data$Q6)\ntitle <- “Do you think the climate is changing?”\nlevel_order <- c(“Don<80><99>t know”, “Definitely not changing”, “Probably not changing”, “Probably changing”, “Definitely changing”) ## code if a specific palette is needed for matching fill = wheel(ochre, num = as.integer(count(q6_data[1]))) # make plot q6_data_plot <- ggplot(q6_data, aes(x = n, y = response, fill = fill)) + geom_col(colour = “white”) + ## add percentage labels geom_text(aes(label = perc), ## make labels left-aligned and white hjust = 1, colour = “black”, size=4) + # use nudge_x = 30, to shift position ## reduce spacing between labels and bars scale_fill_identity(guide = “none”) + ## get rid of all elements except y axis labels + adjust plot margin theme_ipsum_rc() + theme(plot.margin = margin(rep(15, 4))) + easy_center_title() + # with thanks for helpful info on doing wrap here: https://stackoverflow.com/questions/21878974/wrap-long-axis-labels-via-labeller-label-wrap-in-ggplot2 scale_y_discrete(labels = wrap_format(30), limits = level_order) + theme(plot.title = element_text(size =18, hjust = 0.5), axis.text.y = element_text(size =16)) + labs(title = title, x = ““, y =”“)\nq6_data_plot\nggsave(“figures/q6.png”, width = 18, height = 12, units = “cm”)\nclimate_experience_data$Q51_score <- rowMeans(select(climate_experience_data, Q51_remote_vacation:Q51_heritage))\nclimate_experience_data <- climate_experience_data %>% mutate( Q51_bin = case_when( Q51_score > mean(Q51_score) + sd(Q51_score) ~ “high”, Q51_score < mean(Q51_score) - sd(Q51_score) ~ “low”, TRUE ~ “medium” ) %>% factor(levels = c(“low”, “medium”, “high”)) )\nclimate_experience_data$Q52_score <- rowMeans(select(climate_experience_data, Q52a_1:Q52f_1))\nclimate_experience_data <- climate_experience_data %>% mutate( Q52_bin = case_when( Q52_score > mean(Q52_score) + sd(Q52_score) ~ “high”, Q52_score < mean(Q52_score) - sd(Q52_score) ~ “low”, TRUE ~ “medium” ) %>% factor(levels = c(“low”, “medium”, “high”)) )"
},
{
"objectID": "chapter_2.html#q57-subsetting-based-on-religiosity",
@@ -130,7 +130,7 @@
"href": "chapter_3.html",
"title": "4 Mapping churches: geospatial data science",
"section": "",
- "text": "Until recently, most data science books didn’t have a section on geospatial data. It was considered a specialist form of research best left to GIS technicians who tended to use proprietary tools like ArcGIS. This has changed significantly in the past five years, but you’ll still be hard pressed to find an introduction to the subject which strays very far from a few simple data sets (mostly of the USA) and relatively uncomplicated geospatial operations. I actually first began learning R, back in 2013, right when open source geospatial research tools were beginning to be developed with quite a lot more energy and geospatial data is my personal favourite data science playground, so in this book we’re going to go much deeper than is usual. There are also good reasons to take things a few steps further in the particular forms of data and inquiry that religion takes us into.\nRecommend https://r-spatial.org/book/\nGeospatial data is, in the most basic form, working with maps. This means that most of your data can be a quite simple dataframe, e.g. just a list of names or categories associated with a set of X and Y coordinates. Once you have a set of items, however, things get interesting very quickly, as you can layer data sets on top of one another. We’re going to begin this chapter by developing a geolocated data set of churches in the UK. This information is readily and freely available online thanks to the UK Ordnance Survey, a quasi-governmental agency which maintains the various (now digital) maps of Britain. Lucky for us, the Ordnance Survey has an open data product that anyone can use!\nBefore we begin, there are some key things we should note about geospatial data. Geospatial data tends to fall into one of two kinds: points and polygons. Points can be any kind of feature: a house, a church, a pub, someone’s favourite ancient oak tree, or some kind of sacred relic. Polygons tend to be associated with wider areas, and as such can be used to describe large features, e.g. an Ocean, a local authority, or a mountain, or also demographic features, like a census Output Area with associated census summaries. Points are very simple data representations, an X and Y coordinate. Polygons are much more complex, often containing dozens or even thousands of points.\nThe most complex aspect of point data relates to the ways that coordinates are encoded, as they will aways need to be associated with a coordinate reference system (CRS) which describes how they are situated with respect to the planet earth. The most common CRS is the WGS, though for our data sets we’ll also come into contact with the BGS, a specifically British coordinate reference system. There are dozens of CRS, usually mapping onto a specific geographical region. Bearing in mind the way that you need to use a CRS to understand how coordinates can be associated with specific parts of the earth, you can see how this is a bit like survey data, where you also need a “codebook” to understand what the specific response values map onto, e.g. a “1” means “strongly agree” and so on. We also saw, in a previous chapter, how some forms of data have the codebook already baked into the factor data, simplifying the process of interpreting the data. In a similar way, some types of geospatial data sets can also come with CRS “baked in” while we’ll need to define CRS for other types. Here are some of the most common types of geospatial data files:\nCSV: “comma separated values” a plain text file containing various coordinates Shapefile: a legacy file format, often still in use, but being replaced by others for a variety of good reasons. For more on this see [http://switchfromshapefile.org/] Geopackage: one of the more recent ways of packaging up geospatial data. Geopackages can contain a wide variety of different data and are easily portable. GeoJSON: a file format commonly used in other forms of coding, the “JSON” (an acronym for JavaScript Object Notation) is meant to be easily interchangeable across various platforms. GeoJSON is an augmented version of JSON data with coordinates added in.\nNow that you have a sense of some of the basic aspects of geospatial data, let’s dive in and do a bit of learning in action.\n\n5 Administrative shapes - the UK\nA good starting point is to aquire some adminstrative data. This is a way of referring to political boundaries, whether country borders or those of a local authority or some other administrative unit. For our purposes, we’re going to import several different types of administrative boundary which will be used at different points in our visualisations below. It’s worth noting that the data we use here was prepared to support the 2011 census, and make use of the shapefile format.\n\nlibrary(sf) |> suppressPackageStartupMessages()\nlibrary(here) |> suppressPackageStartupMessages()\nlibrary(tidyverse) |> suppressPackageStartupMessages()\nsetwd(\"/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion\")\nhere::i_am(\"chapter_3.qmd\")\n\nhere() starts at /Users/kidwellj/gits/hacking_religion_textbook/hacking_religion\n\n# Download administrative boundaries for whole UK at country level\nif (file.exists(here(\"data\", \"infuse_uk_2011_clipped.shp\")) == FALSE) {\ndownload.file(\"https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_uk_2011_clipped.zip\", destfile = \"data/infuse_uk_2011_clipped.zip\")\nunzip(\"data/infuse_uk_2011_clipped.zip\", exdir = \"data\")\n}\nuk_countries <- st_read(here(\"data\", \"infuse_uk_2011_clipped.shp\"))\n\nReading layer `infuse_uk_2011_clipped' from data source \n `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/infuse_uk_2011_clipped.shp' \n using driver `ESRI Shapefile'\nSimple feature collection with 1 feature and 3 fields\nGeometry type: MULTIPOLYGON\nDimension: XY\nBounding box: xmin: -69.1254 ymin: 5337.9 xmax: 655604.7 ymax: 1220302\nProjected CRS: OSGB36 / British National Grid\n\n# Download administrative boundaries for whole UK at regions level\nif (file.exists(here(\"data\", \"infuse_rgn_2011_clipped.shp\")) == FALSE) {\ndownload.file(\"https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_rgn_2011_clipped.zip\", destfile = \"data/infuse_rgn_2011_clipped.zip\")\nunzip(\"data/infuse_rgn_2011_clipped.zip\", exdir = \"data\")\n}\nuk_rgn <- st_read(here(\"data\", \"infuse_rgn_2011_clipped.shp\"))\n\nReading layer `infuse_rgn_2011_clipped' from data source \n `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/infuse_rgn_2011_clipped.shp' \n using driver `ESRI Shapefile'\nSimple feature collection with 9 features and 2 fields\nGeometry type: MULTIPOLYGON\nDimension: XY\nBounding box: xmin: 82672 ymin: 5337.9 xmax: 655604.7 ymax: 657534.1\nProjected CRS: OSGB36 / British National Grid\n\n# Download administrative boundaries for whole UK at local authority level\nif (file.exists(here(\"data\", \"infuse_dist_lyr_2011_clipped.shp\")) == FALSE) {\ndownload.file(\"https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_dist_lyr_2011_clipped.zip\", destfile = \"data/infuse_dist_lyr_2011_clipped.zip\")\nunzip(\"data/infuse_dist_lyr_2011_clipped.zip\", exdir = \"data\")\n}\nlocal_authorities <- st_read(here(\"data\", \"infuse_dist_lyr_2011_clipped.shp\"))\n\nReading layer `infuse_dist_lyr_2011_clipped' from data source \n `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/infuse_dist_lyr_2011_clipped.shp' \n using driver `ESRI Shapefile'\nSimple feature collection with 404 features and 3 fields\nGeometry type: MULTIPOLYGON\nDimension: XY\nBounding box: xmin: -69.1254 ymin: 5337.9 xmax: 655604.7 ymax: 1220302\nProjected CRS: OSGB36 / British National Grid\n\n# Download building outlines for whole UK\nif (file.exists(here(\"data\", \"infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg\")) == FALSE) {\n download.file(\"https://zenodo.org/record/6395804/files/infuse_dist_lyr_2011_simplified_100m_buildings_overlay_simplified.gpkg\", destfile = here(\"data\", \"infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg\"))}\nlocal_authorities_buildings_clip <- st_read(here(\"data\", \"infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg\"))\n\nReading layer `infuse_dist_lyr_2011_simplified_100_buildings_overlay_simplified' from data source `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg' \n using driver `GPKG'\nSimple feature collection with 403 features and 0 fields\nGeometry type: MULTIPOLYGON\nDimension: XY\nBounding box: xmin: -69.1254 ymin: 5524.797 xmax: 655986.4 ymax: 1219597\nProjected CRS: OSGB36 / British National Grid\n\n\nBefore we move on, let’s plot a simple map and have a look at one of our administrative layers. We can use ggplot with a new type of shape geom_sf() to plot the contents of a geospatial data file with polygons which is loaded as a simplefeature in R.\n\nggplot(uk_countries) +\n geom_sf()\n\n\n\n\n\n\n6 Load in Ordnance Survey OpenMap Points Data\n\n# Note: for more advanced reproducible scripts which demonstrate how these data surces have been \n# obtained, see the companion cookbook here: https://github.com/kidwellj/hacking_religion_cookbook/blob/main/ordnance_survey.R\n\nos_openmap_pow <- st_read(here(\"data\", \"os_openmap_pow.gpkg\"))\n\nReading layer `os_openmap_pow' from data source \n `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/os_openmap_pow.gpkg' \n using driver `GPKG'\nSimple feature collection with 48759 features and 5 fields\nGeometry type: POLYGON\nDimension: XY\nBounding box: xmin: 64594.12 ymin: 8287.54 xmax: 655238.1 ymax: 1214662\nProjected CRS: OSGB36 / British National Grid\n\nggplot(os_openmap_pow) +\n geom_sf()\n\n\n\n\nIt’s worth noting that the way that you load geospatial data in R has changed quite dramatically since 2020 with the introduction of the simplefeature class in R. Much of the documentation you will come across “out there” will make reference to a set of functions which are now deprecated.\nLet’s use that data we’ve just loaded to make our first map:\n\n# Generate choropleth map of respondent locations\n# using temporary palette here so that 0s are white\nlibrary(tmap) |> suppressPackageStartupMessages()\n# palette <- c(white, \"#a8ddb5\", \"#43a2ca\")\nmap1 <- tm_shape(local_authorities) + \n# tm_fill(col = \"surveys_count\", , palette = palette, title = \"Concentration of survey respondents\") +\n tm_borders(alpha=.5, lwd=0.1) +\n # for intermediate polygon geometries\n # tm_shape(local_authorities) +\n # tm_borders(lwd=0.6) +\n # for dots from original dataset\n # tm_dots(\"red\", size = .05, alpha = .4) +\n tm_scale_bar(position = c(\"right\", \"bottom\")) +\n tm_style(\"gray\") +\n tm_credits(\"Data: UK Data Service (OGL)\\n& Jeremy H. Kidwell,\\nGraphic is CC-by-SA 4.0\", \n size = 0.4, \n position = c(\"left\", \"bottom\"),\n just = c(\"left\", \"bottom\"),\n align = \"left\") +\n tm_layout(asp = NA,\n frame = FALSE, \n title = \"Figure 1a\", \n title.size = .7,\n legend.title.size = .7,\n inner.margins = c(0.1, 0.1, 0.05, 0.05)\n )\n\nmap1\n\n\n\n# save image\ntmap_save(map1, here(\"figures\", \"map.png\"), width=1920, height=1080, asp=0)\n\nMap saved to /Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/figures/map.png\n\n\nResolution: 1920 by 1080 pixels\n\n\nSize: 6.4 by 3.6 inches (300 dpi)\n\n\n\n# subsetting ordnance survey openmap data for measuring clusters and proximity\n\nos_openmap_important_buildings <- st_read(here(\"data\", \"os_openmap_important_buildings.gpkg\"))\n\nReading layer `important_buildings' from data source \n `/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/data/os_openmap_important_buildings.gpkg' \n using driver `GPKG'\nSimple feature collection with 229800 features and 5 fields\nGeometry type: POLYGON\nDimension: XY\nBounding box: xmin: 64594.12 ymin: 8125.44 xmax: 655500.5 ymax: 1214662\nProjected CRS: OSGB36 / British National Grid\n\n# add pubs, check_cashing, pawnbrokers, SSSI\n## subsets\n\n\n# OSM data\n\n# Note: for more advanced reproducible scripts which demonstrate how these data surces have been \n# obtained, see the companion cookbook here: https://github.com/kidwellj/hacking_religion_cookbook/blob/main/ordnance_survey.R\n\n\n# osm_uk_points <- st_read(system.file(here(\"data\", \"pow_osm.gpkg\", package = \"spData\")))\n# vector_filepath = system.file(\"data/osm-gb-2018Aug29_pow_osm.pbf\", package = \"sf\")\n# osm_uk_points = st_read(vector_filepath)\n\nGuides to geographies: https://rconsortium.github.io/censusguide/ https://ocsi.uk/2019/03/18/lsoas-leps-and-lookups-a-beginners-guide-to-statistical-geographies/\nExtact places of worship from Ordnance survey open data set Calculate proximity to pubs\n\n\nReferences"
+ "text": "Until recently, most data science books didn’t have a section on geospatial data. It was considered a specialist form of research best left to GIS technicians who tended to use proprietary tools like ArcGIS. This has changed significantly in the past five years, but you’ll still be hard pressed to find an introduction to the subject which strays very far from a few simple data sets (mostly of the USA) and relatively uncomplicated geospatial operations. I actually first began learning R, back in 2013, right when open source geospatial research tools were beginning to be developed with quite a lot more energy and geospatial data is my personal favourite data science playground, so in this book we’re going to go much deeper than is usual. There are also good reasons to take things a few steps further in the particular forms of data and inquiry that religion takes us into.\nRecommend https://r-spatial.org/book/\nGeospatial data is, in the most basic form, working with maps. This means that most of your data can be a quite simple dataframe, e.g. just a list of names or categories associated with a set of X and Y coordinates. Once you have a set of items, however, things get interesting very quickly, as you can layer data sets on top of one another. We’re going to begin this chapter by developing a geolocated data set of churches in the UK. This information is readily and freely available online thanks to the UK Ordnance Survey, a quasi-governmental agency which maintains the various (now digital) maps of Britain. Lucky for us, the Ordnance Survey has an open data product that anyone can use!\nBefore we begin, there are some key things we should note about geospatial data. Geospatial data tends to fall into one of two kinds: points and polygons. Points can be any kind of feature: a house, a church, a pub, someone’s favourite ancient oak tree, or some kind of sacred relic. Polygons tend to be associated with wider areas, and as such can be used to describe large features, e.g. an Ocean, a local authority, or a mountain, or also demographic features, like a census Output Area with associated census summaries. Points are very simple data representations, an X and Y coordinate. Polygons are much more complex, often containing dozens or even thousands of points.\nThe most complex aspect of point data relates to the ways that coordinates are encoded, as they will aways need to be associated with a coordinate reference system (CRS) which describes how they are situated with respect to the planet earth. The most common CRS is the WGS, though for our data sets we’ll also come into contact with the BGS, a specifically British coordinate reference system. There are dozens of CRS, usually mapping onto a specific geographical region. Bearing in mind the way that you need to use a CRS to understand how coordinates can be associated with specific parts of the earth, you can see how this is a bit like survey data, where you also need a “codebook” to understand what the specific response values map onto, e.g. a “1” means “strongly agree” and so on. We also saw, in a previous chapter, how some forms of data have the codebook already baked into the factor data, simplifying the process of interpreting the data. In a similar way, some types of geospatial data sets can also come with CRS “baked in” while we’ll need to define CRS for other types. Here are some of the most common types of geospatial data files:\nCSV: “comma separated values” a plain text file containing various coordinates Shapefile: a legacy file format, often still in use, but being replaced by others for a variety of good reasons. For more on this see [http://switchfromshapefile.org/] Geopackage: one of the more recent ways of packaging up geospatial data. Geopackages can contain a wide variety of different data and are easily portable. GeoJSON: a file format commonly used in other forms of coding, the “JSON” (an acronym for JavaScript Object Notation) is meant to be easily interchangeable across various platforms. GeoJSON is an augmented version of JSON data with coordinates added in.\nNow that you have a sense of some of the basic aspects of geospatial data, let’s dive in and do a bit of learning in action.\n\n5 Administrative shapes - the UK\nA good starting point is to aquire some adminstrative data. This is a way of referring to political boundaries, whether country borders or those of a local authority or some other administrative unit. For our purposes, we’re going to import several different types of administrative boundary which will be used at different points in our visualisations below. It’s worth noting that the data we use here was prepared to support the 2011 census, and make use of the shapefile format.\n\nlibrary(sf) |> suppressPackageStartupMessages()\nlibrary(here) |> suppressPackageStartupMessages()\nlibrary(tidyverse) \n\n-- Attaching core tidyverse packages ------------------------ tidyverse 2.0.0 --\nv dplyr 1.1.3 v readr 2.1.4\nv forcats 1.0.0 v stringr 1.5.0\nv ggplot2 3.4.3 v tibble 3.2.1\nv lubridate 1.9.3 v tidyr 1.3.0\nv purrr 1.0.2 \n-- Conflicts ------------------------------------------ tidyverse_conflicts() --\nx dplyr::filter() masks stats::filter()\nx dplyr::lag() masks stats::lag()\ni Use the conflicted package (<http://conflicted.r-lib.org/>) to force all conflicts to become errors\n\n# better video device, more accurate and faster rendering, esp. on macos. Also should enable system fonts for display\nlibrary(ragg) |> suppressPackageStartupMessages()\n\nsetwd(\"/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion\")\nhere::i_am(\"chapter_3.qmd\")\n\nhere() starts at /Users/kidwellj/gits/hacking_religion_textbook/hacking_religion\n\n# Download administrative boundaries for whole UK at country level\nif (file.exists(here(\"data\", \"infuse_uk_2011_clipped.shp\")) == FALSE) {\ndownload.file(\"https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_uk_2011_clipped.zip\", destfile = \"data/infuse_uk_2011_clipped.zip\")\nunzip(\"data/infuse_uk_2011_clipped.zip\", exdir = \"data\")\n}\nuk_countries <- st_read(here(\"data\", \"infuse_uk_2011_clipped.shp\"), quiet = TRUE)\n\n# Download administrative boundaries for whole UK at regions level\nif (file.exists(here(\"data\", \"infuse_rgn_2011_clipped.shp\")) == FALSE) {\ndownload.file(\"https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_rgn_2011_clipped.zip\", destfile = \"data/infuse_rgn_2011_clipped.zip\")\nunzip(\"data/infuse_rgn_2011_clipped.zip\", exdir = \"data\")\n}\nuk_rgn <- st_read(here(\"data\", \"infuse_rgn_2011_clipped.shp\"), quiet = TRUE)\n\n# Download administrative boundaries for whole UK at local authority level\nif (file.exists(here(\"data\", \"infuse_dist_lyr_2011_clipped.shp\")) == FALSE) {\ndownload.file(\"https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_dist_lyr_2011_clipped.zip\", destfile = \"data/infuse_dist_lyr_2011_clipped.zip\")\nunzip(\"data/infuse_dist_lyr_2011_clipped.zip\", exdir = \"data\")\n}\nlocal_authorities <- st_read(here(\"data\", \"infuse_dist_lyr_2011_clipped.shp\"), quiet = TRUE)\n\n# Download building outlines for whole UK\nif (file.exists(here(\"data\", \"infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg\")) == FALSE) {\n download.file(\"https://zenodo.org/record/6395804/files/infuse_dist_lyr_2011_simplified_100m_buildings_overlay_simplified.gpkg\", destfile = here(\"data\", \"infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg\"))}\nlocal_authorities_buildings_clip <- st_read(here(\"data\", \"infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg\"), quiet = TRUE)\n\nBefore we move on, let’s plot a simple map and have a look at one of our administrative layers. We can use ggplot with a new type of shape geom_sf() to plot the contents of a geospatial data file with polygons which is loaded as a simplefeature in R.\n\nlibrary(bench) |> suppressPackageStartupMessages()\nbench_time(ggplot(uk_countries) + geom_sf())\n\nprocess real \n 6.83ms 7.05ms \n\n\n\n\n6 Load in Ordnance Survey OpenMap Points Data\n\n# Note: for more advanced reproducible scripts which demonstrate how these data surces have been \n# obtained, see the companion cookbook here: https://github.com/kidwellj/hacking_religion_cookbook/blob/main/ordnance_survey.R\n\nos_openmap_pow <- st_read(here(\"data\", \"os_openmap_pow.gpkg\"), quiet = TRUE)\n\nbench_time(ggplot(os_openmap_pow) + geom_sf())\n\nprocess real \n 1.17ms 1.15ms \n\n\nIt’s worth noting that the way that you load geospatial data in R has changed quite dramatically since 2020 with the introduction of the simplefeature class in R. Much of the documentation you will come across “out there” will make reference to a set of functions which are now deprecated.\nLet’s use that data we’ve just loaded to make our first map:\n\n# Generate choropleth map of respondent locations\n# using temporary palette here so that 0s are white\nlibrary(tmap) |> suppressPackageStartupMessages()\n# palette <- c(white, \"#a8ddb5\", \"#43a2ca\")\n\nmap1 <- tm_shape(local_authorities) + \n# tm_fill(col = \"surveys_count\", , palette = palette, title = \"Concentration of survey respondents\") +\n tm_borders(alpha=.5, lwd=0.1) +\n # for intermediate polygon geometries\n # tm_shape(local_authorities) +\n # tm_borders(lwd=0.6) +\n # for dots from original dataset\n # tm_dots(\"red\", size = .05, alpha = .4) +\n tm_scale_bar(position = c(\"right\", \"bottom\")) +\n tm_style(\"gray\") +\n tm_credits(\"Data: UK Data Service (OGL)\\n& Jeremy H. Kidwell,\\nGraphic is CC-by-SA 4.0\", \n size = 0.4, \n position = c(\"left\", \"bottom\"),\n just = c(\"left\", \"bottom\"),\n align = \"left\") +\n tm_layout(asp = NA,\n frame = FALSE, \n title = \"Figure 1a\", \n title.size = .7,\n legend.title.size = .7,\n inner.margins = c(0.1, 0.1, 0.05, 0.05)\n )\n\nmap1\n\n\n\n# save image\ntmap_save(map1, here(\"figures\", \"map.png\"), width=1920, height=1080, asp=0)\n\nMap saved to /Users/kidwellj/gits/hacking_religion_textbook/hacking_religion/figures/map.png\n\n\nResolution: 1920 by 1080 pixels\n\n\nSize: 6.4 by 3.6 inches (300 dpi)\n\n\n\n# subsetting ordnance survey openmap data for measuring clusters and proximity\n\nos_openmap_important_buildings <- st_read(here(\"data\", \"os_openmap_important_buildings.gpkg\"), quiet = TRUE)\n\n# add pubs, check_cashing, pawnbrokers, SSSI\n## subsets\n\n\nCount the number of churches in Local Authorities\n\n\n# OSM data\n\n# Note: for more advanced reproducible scripts which demonstrate how these data surces have been \n# obtained, see the companion cookbook here: https://github.com/kidwellj/hacking_religion_cookbook/blob/main/ordnance_survey.R\n\n\n# osm_uk_points <- st_read(system.file(here(\"data\", \"pow_osm.gpkg\", package = \"spData\")))\n# vector_filepath = system.file(\"data/osm-gb-2018Aug29_pow_osm.pbf\", package = \"sf\")\n# osm_uk_points = st_read(vector_filepath)\n\nGuides to geographies: https://rconsortium.github.io/censusguide/ https://ocsi.uk/2019/03/18/lsoas-leps-and-lookups-a-beginners-guide-to-statistical-geographies/\nCalculate proximity to pubs\n\n\nReferences"
},
{
"objectID": "chapter_4.html",
diff --git a/hacking_religion/chapter_1.qmd b/hacking_religion/chapter_1.qmd
index 04fa4e0..f9f610c 100644
--- a/hacking_religion/chapter_1.qmd
+++ b/hacking_religion/chapter_1.qmd
@@ -9,7 +9,7 @@ Let's start by importing some data into R. Because R is what is called an object
In the example below, we're going to read in data from a comma separated value file ("csv") which has rows of information on separate lines in a text file with each column separated by a comma. This is one of the standard plain text file formats. R has a function you can use to import this efficiently called "read.csv". Each line of code in R usually starts with the object, and then follows with instructions on what we're going to put inside it, where that comes from, and how to format it:
-```{r}
+```{r, results = 'hide'}
#| include: true
#| label: fig-polar
setwd("/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion")
@@ -92,8 +92,8 @@ barplot(height=df$value, names=df$key)
### GGPlot
```{r}
-ggplot(uk_census_2021_religion_wmids, aes(x = key, y = value)) + # <1>
- geom_bar(stat = "identity") # <1>
+ggplot(uk_census_2021_religion_wmids, aes(x = key, y = value)) + # <1>
+ geom_bar(stat = "identity") # <1>
ggplot(uk_census_2021_religion_wmids, aes(x= reorder(key,-value),value)) + geom_bar(stat ="identity") # <2>
```
@@ -215,7 +215,6 @@ If you want to draw some data from the nomis platform yourself in R, have a look
```{r}
# Get table of Census 2011 religion data from nomis
-# Note: for reproducible code used to generate the dataset used in the book, see the cookbook here:
z <- readRDS(file = (here("example_data", "z.rds")))
# Filter down to simplified dataset with England / Wales and percentages without totals
diff --git a/hacking_religion/chapter_2.qmd b/hacking_religion/chapter_2.qmd
index 2f98351..063db21 100644
--- a/hacking_religion/chapter_2.qmd
+++ b/hacking_religion/chapter_2.qmd
@@ -10,12 +10,13 @@ We've decided to open up access to our data and I'm highlighting it in this book
# Loading in some data
-```{r}
+```{r, results = 'hide'}
# R Setup -----------------------------------------------------------------
setwd("/Users/kidwellj/gits/hacking_religion_textbook/hacking_religion")
-library(here)
-library(tidyverse)
-library(haven) # used for importing SPSS .sav files
+library(here) |> suppressPackageStartupMessages()
+library(tidyverse) |> suppressPackageStartupMessages()
+# used for importing SPSS .sav files
+library(haven) |> suppressPackageStartupMessages()
here::i_am("chapter_2.qmd")
climate_experience_data <- read_sav(here("example_data", "climate_experience_data.sav"))
```
diff --git a/hacking_religion/chapter_3.qmd b/hacking_religion/chapter_3.qmd
index b20f899..d6b7893 100644
--- a/hacking_religion/chapter_3.qmd
+++ b/hacking_religion/chapter_3.qmd
@@ -21,7 +21,7 @@ Now that you have a sense of some of the basic aspects of geospatial data, let's
A good starting point is to aquire some adminstrative data. This is a way of referring to political boundaries, whether country borders or those of a local authority or some other administrative unit. For our purposes, we're going to import several different types of administrative boundary which will be used at different points in our visualisations below. It's worth noting that the data we use here was prepared to support the 2011 census, and make use of the shapefile format.
-```{R}
+```{r, results = 'hide'}
library(sf) |> suppressPackageStartupMessages()
library(here) |> suppressPackageStartupMessages()
library(tidyverse)
@@ -36,46 +36,43 @@ if (file.exists(here("data", "infuse_uk_2011_clipped.shp")) == FALSE) {
download.file("https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_uk_2011_clipped.zip", destfile = "data/infuse_uk_2011_clipped.zip")
unzip("data/infuse_uk_2011_clipped.zip", exdir = "data")
}
-uk_countries <- st_read(here("data", "infuse_uk_2011_clipped.shp"))
+uk_countries <- st_read(here("data", "infuse_uk_2011_clipped.shp"), quiet = TRUE)
# Download administrative boundaries for whole UK at regions level
if (file.exists(here("data", "infuse_rgn_2011_clipped.shp")) == FALSE) {
download.file("https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_rgn_2011_clipped.zip", destfile = "data/infuse_rgn_2011_clipped.zip")
unzip("data/infuse_rgn_2011_clipped.zip", exdir = "data")
}
-uk_rgn <- st_read(here("data", "infuse_rgn_2011_clipped.shp"))
+uk_rgn <- st_read(here("data", "infuse_rgn_2011_clipped.shp"), quiet = TRUE)
# Download administrative boundaries for whole UK at local authority level
if (file.exists(here("data", "infuse_dist_lyr_2011_clipped.shp")) == FALSE) {
download.file("https://borders.ukdataservice.ac.uk/ukborders/easy_download/prebuilt/shape/infuse_dist_lyr_2011_clipped.zip", destfile = "data/infuse_dist_lyr_2011_clipped.zip")
unzip("data/infuse_dist_lyr_2011_clipped.zip", exdir = "data")
}
-local_authorities <- st_read(here("data", "infuse_dist_lyr_2011_clipped.shp"))
+local_authorities <- st_read(here("data", "infuse_dist_lyr_2011_clipped.shp"), quiet = TRUE)
# Download building outlines for whole UK
if (file.exists(here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg")) == FALSE) {
download.file("https://zenodo.org/record/6395804/files/infuse_dist_lyr_2011_simplified_100m_buildings_overlay_simplified.gpkg", destfile = here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg"))}
-local_authorities_buildings_clip <- st_read(here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg"))
+local_authorities_buildings_clip <- st_read(here("data", "infuse_dist_lyr_2011_simplified_100m_buildings_simplified.gpkg"), quiet = TRUE)
```
Before we move on, let's plot a simple map and have a look at one of our administrative layers. We can use ggplot with a new type of shape `geom_sf()` to plot the contents of a geospatial data file with polygons which is loaded as a `simplefeature` in R.
```{r}
-ggplot(uk_countries) +
- geom_sf()
+library(bench) |> suppressPackageStartupMessages()
+bench_time(ggplot(uk_countries) + geom_sf())
```
# Load in Ordnance Survey OpenMap Points Data
```{r}
-
# Note: for more advanced reproducible scripts which demonstrate how these data surces have been
# obtained, see the companion cookbook here: https://github.com/kidwellj/hacking_religion_cookbook/blob/main/ordnance_survey.R
-os_openmap_pow <- st_read(here("data", "os_openmap_pow.gpkg"))
-
-ggplot(os_openmap_pow) +
- geom_sf()
+os_openmap_pow <- st_read(here("data", "os_openmap_pow.gpkg"), quiet = TRUE)
+bench_time(ggplot(os_openmap_pow) + geom_sf())
```
It's worth noting that the way that you load geospatial data in R has changed quite dramatically since 2020 with the introduction of the simplefeature class in R. Much of the documentation you will come across "out there" will make reference to a set of functions which are now deprecated.
@@ -87,6 +84,7 @@ Let's use that data we've just loaded to make our first map:
# using temporary palette here so that 0s are white
library(tmap) |> suppressPackageStartupMessages()
# palette <- c(white, "#a8ddb5", "#43a2ca")
+
map1 <- tm_shape(local_authorities) +
# tm_fill(col = "surveys_count", , palette = palette, title = "Concentration of survey respondents") +
tm_borders(alpha=.5, lwd=0.1) +
@@ -116,14 +114,15 @@ map1
tmap_save(map1, here("figures", "map.png"), width=1920, height=1080, asp=0)
```
-```{r}
+```{r, results = 'hide'}
# subsetting ordnance survey openmap data for measuring clusters and proximity
-os_openmap_important_buildings <- st_read(here("data", "os_openmap_important_buildings.gpkg"))
+os_openmap_important_buildings <- st_read(here("data", "os_openmap_important_buildings.gpkg"), quiet = TRUE)
# add pubs, check_cashing, pawnbrokers, SSSI
## subsets
```
+1. Count the number of churches in Local Authorities
```{r}