diff --git a/.jekyll-cache/Jekyll/Cache/Jekyll--Cache/b7/9606fb3afea5bd1609ed40b622142f1c98125abcfe89a76a661b0e8e343910 b/.jekyll-cache/Jekyll/Cache/Jekyll--Cache/b7/9606fb3afea5bd1609ed40b622142f1c98125abcfe89a76a661b0e8e343910 index cce5c46..b2f4f5b 100644 --- a/.jekyll-cache/Jekyll/Cache/Jekyll--Cache/b7/9606fb3afea5bd1609ed40b622142f1c98125abcfe89a76a661b0e8e343910 +++ b/.jekyll-cache/Jekyll/Cache/Jekyll--Cache/b7/9606fb3afea5bd1609ed40b622142f1c98125abcfe89a76a661b0e8e343910 @@ -1 +1 @@ -I"Y{"source"=>"C:/Users/lizmo/Documents/GitHub/liz-muehlmann.github.io", "destination"=>"C:/Users/lizmo/Documents/GitHub/liz-muehlmann.github.io/_site", "collections_dir"=>"", "cache_dir"=>".jekyll-cache", "plugins_dir"=>"_plugins", "layouts_dir"=>"_layouts", "data_dir"=>"_data", "includes_dir"=>"_includes", "collections"=>{"posts"=>{"output"=>true, "permalink"=>"/:categories/:year/:month/:day/:title:output_ext"}}, "safe"=>false, "include"=>[".htaccess"], "exclude"=>[".sass-cache", ".jekyll-cache", "gemfiles", "Gemfile", "Gemfile.lock", "node_modules", "vendor/bundle/", "vendor/cache/", "vendor/gems/", "vendor/ruby/"], "keep_files"=>[".git", ".svn"], "encoding"=>"utf-8", "markdown_ext"=>"markdown,mkdown,mkdn,mkd,md", "strict_front_matter"=>false, "show_drafts"=>nil, "limit_posts"=>0, "future"=>false, "unpublished"=>false, "whitelist"=>[], "plugins"=>[], "markdown"=>"kramdown", "highlighter"=>"rouge", "lsi"=>false, "excerpt_separator"=>"\n\n", "incremental"=>false, "detach"=>false, "port"=>"4000", "host"=>"127.0.0.1", "baseurl"=>"/", "show_dir_listing"=>false, "permalink"=>"date", "paginate_path"=>"/page:num", "timezone"=>nil, "quiet"=>false, "verbose"=>false, "defaults"=>[], "liquid"=>{"error_mode"=>"warn", "strict_filters"=>false, "strict_variables"=>false}, "kramdown"=>{"auto_ids"=>true, "toc_levels"=>[1, 2, 3, 4, 5, 6], "entity_output"=>"as_char", "smart_quotes"=>"lsquo,rsquo,ldquo,rdquo", "input"=>"GFM", "hard_wrap"=>false, "guess_lang"=>true, "footnote_nr"=>1, "show_warnings"=>false}, "title"=>"Liz Muehlmann", "email"=>"liz.muehlmann@uci.edu", "description"=>"Academic website for Liz Muehlmann. PhD Candidate in Political Science at the University of Irvine, California.", "url"=>"http://localhost:4000", "twitter_username"=>"@hello_iamliz", "github_username"=>"liz-muehlmann", "author"=>"Liz", "livereload_port"=>35729, "serving"=>true, "watch"=>true}:ET \ No newline at end of file +I"Ó{"source"=>"C:/Users/lizmo/Documents/GitHub/liz-muehlmann.github.io", "destination"=>"C:/Users/lizmo/Documents/GitHub/liz-muehlmann.github.io/_site", "collections_dir"=>"", "cache_dir"=>".jekyll-cache", "plugins_dir"=>"_plugins", "layouts_dir"=>"_layouts", "data_dir"=>"_data", "includes_dir"=>"_includes", "collections"=>{"posts"=>{"output"=>true, "permalink"=>"/:categories/:year/:month/:day/:title:output_ext"}}, "safe"=>false, "include"=>[".htaccess"], "exclude"=>[".sass-cache", ".jekyll-cache", "gemfiles", "Gemfile", "Gemfile.lock", "node_modules", "vendor/bundle/", "vendor/cache/", "vendor/gems/", "vendor/ruby/"], "keep_files"=>[".git", ".svn"], "encoding"=>"utf-8", "markdown_ext"=>"markdown,mkdown,mkdn,mkd,md", "strict_front_matter"=>false, "show_drafts"=>nil, "limit_posts"=>0, "future"=>false, "unpublished"=>false, "whitelist"=>[], "plugins"=>[], "markdown"=>"kramdown", "highlighter"=>"rouge", "lsi"=>false, "excerpt_separator"=>"\n\n", "incremental"=>false, "detach"=>false, "port"=>"4000", "host"=>"127.0.0.1", "baseurl"=>"/", "show_dir_listing"=>false, "permalink"=>"date", "paginate_path"=>"/page:num", "timezone"=>nil, "quiet"=>false, "verbose"=>false, "defaults"=>[], "liquid"=>{"error_mode"=>"warn", "strict_filters"=>false, "strict_variables"=>false}, "kramdown"=>{"auto_ids"=>true, "toc_levels"=>[1, 2, 3, 4, 5, 6], "entity_output"=>"as_char", "smart_quotes"=>"lsquo,rsquo,ldquo,rdquo", "input"=>"GFM", "hard_wrap"=>false, "guess_lang"=>true, "footnote_nr"=>1, "show_warnings"=>false, "syntax_highlighter"=>"rouge", "syntax_highlighter_opts"=>{:default_lang=>"plaintext", :guess_lang=>true}, "coderay"=>{}}, "title"=>"Liz Muehlmann", "email"=>"liz.muehlmann@uci.edu", "description"=>"Academic website for Liz Muehlmann. PhD Candidate in Political Science at the University of Irvine, California.", "url"=>"http://localhost:4000", "twitter_username"=>"@hello_iamliz", "github_username"=>"liz-muehlmann", "author"=>"Liz", "livereload_port"=>35729, "serving"=>true, "watch"=>true}:ET \ No newline at end of file diff --git a/.jekyll-cache/Jekyll/Cache/Jekyll--Converters--Markdown/24/9e63b80ffcf088c2da4cb29077ed69bcfc2bacd2b5c445136ac44f82e08a71 b/.jekyll-cache/Jekyll/Cache/Jekyll--Converters--Markdown/24/9e63b80ffcf088c2da4cb29077ed69bcfc2bacd2b5c445136ac44f82e08a71 deleted file mode 100644 index b8ba13b..0000000 --- a/.jekyll-cache/Jekyll/Cache/Jekyll--Converters--Markdown/24/9e63b80ffcf088c2da4cb29077ed69bcfc2bacd2b5c445136ac44f82e08a71 +++ /dev/null @@ -1,478 +0,0 @@ -I"X›
Twitter is a great resource for engaging with the academic community. For example, I saw this Tweet by PhD Genie asking users to name one positive skill learned during their PhD. I love this question for a number of reasons. First, it helps PhDs reframe their experience so it’s applicable outside of academia - which can help when applying to jobs. Second, it’s really cool to see what skills other people have learned during their program.
-
I responded to the tweet because during my PhD I learned how to create maps in R. I started by recreating a map from the University of North Carolina’s Hussman School of Journalism’s News Deserts project (below). Now, I am working on a personal project mapping the U.S. National and State parks.
- - - -There was quite a bit of interest in how to do this, so in this series of posts I will document my process from start to finish.
- -First, I’m not an expert. I wanted to make a map, so I learned how. There may be easier ways and, if I learn how to do them, I’ll write another post.
- -Second, before starting, I strongly suggest setting up a Github and DVC. I wrote about how to use GitHub, the Github Website, and Github Desktop. You can use any of these methods to manage your repositories. I use all three based purely on whatever mood I’m in.
- -If you do use Git or GitHub, then DVC (data version control) is mandatory. GitHub will warn you that your file is too large if it’s over 50MB and reject your pushes if the files are over 100MB. The total repository size can’t exceed 2GB if you’re using the free version (which I am). DVC is useful because cartography files are large. They contain a lot of coordinates which increases with each location you try to map. DVC will store your data outside of GitHub but allows you to track changes with your data. It’s super useful.
- -Third, there are several ways to make a map. R is capable of making interactive maps and static maps. Static maps are less computationally expensive and better for publication. Interactive maps are prettier and better for displaying on the web.
- -I make interactive maps with Leaflet and Shiny because they offer a lot of functionality. The most common way is to use map tiles. Map tiles use data from sources like Open Street Map and Maps to create map squares (tiles) with custom data on top. A list of available map tiles is available on the Open Street Maps website.
- - - -When I make static maps (like the US map pictured above), I use ggplot
- -I had to break the tutorial into different parts because it became unwieldy. I list the component parts below. The annotated version of the code can be found in this project’s repository in the folder called r files
- - -III. cartography in r part three
-IV. cartography in r part four
-You only need to install the packages once. You can do so by running each line in the terminal. When you rerun the code later, you can skip right to loading the packages using library("package-name")
1
-2
-3
-4
-5
-6
-7
-8
-
## you only need to install the packages once
-
- install.packages("leaflet") # interactive maps
- install.packages("shiny") # added map functionality
- install.packages("tidyverse") # data manipulation
- install.packages("tigris") # cartographic boundaries
- install.packages("operator.tools") # for the not-in function
- install.packages("sf") # read and write shapefiles
-
leaflet()
)addTiles()
, addPolygons()
, or addMarkers()
.
- The Tidyverse is a collection of packages used for data manipulation and analysis. Its syntax is more intuitive than base R. Furthermore, you can chain (aka pipe) commands together.
- -For cartography, you don’t need the whole Tidyverse. We’ll mainly use dplyr
and ggplot
. You can install these packages individually instead of installing the whole tidyverse. Though, when we get to the national park database, we’ll also need purr
and tidyr
.
operator.tools is not required, but it’s recommended.
- -For some unknown reason, base R has a %in%
function but not a not-in
function. Unfortunately, the United States is still an empire with it’s associated areas, islands, and pseudo-states. I only want to include the 50 states, so I needed a way to easily filter out the non-states. Operator tool’s %!!in%
function is perfect for that.
To start, create and save a new file called usa.r
. In it, we’re going to download and modify the United States shape data that we’ll use to create the base map in part two of this series.
At the beginning of each file, you have to load the necessary packages. In this file, the only packages we need to load are tidyverse, sf, and tigris. I also load leaflet to make sure the map renders correctly.
- -1
-2
-3
-4
-5
-
## load libraries
- library("tidyverse")
- library("sf")
- library("tigris")
- library("leaflet")
-
There’s two ways to download the USA shape data. First, we can use the R package, tigris. Second, we can download it from the Census website.
- -I prefer using tigris but I’ve been having some problems with it. Sometimes it ignores the Great Lakes and merges Michigan and Wisconsin into a Frankenstate (boxed in red below).
- - - -tigris()
downloads the TIGER/Shapefile data directly from the Census and includes a treasure trove of data. Some of the data includes land area, water area, state names, and geometry.
Tigris can also download boundaries for counties, divisions, regions, tracts, blocks, congressional and school districts, and a whole host of other groupings. A complete list of available data can be found on the packages’ GitHub.
- -1
-2
-3
-4
-5
-6
-7
-8
-9
-
## download state data using tigris()
- us_states <- tigris::states(cb = FALSE, year = 2020) %>%
- filter(STATEFP < 57) %>%
- shift_geometry(preserve_area = FALSE,
- position = "below") %>%
- sf::st_transform("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
-
- ## save the shifted shapefile
- st_write(us_states, "path/to/file/usa.shp")
-
Here we create the us_states
variable, save the geographic data to it, move Alaska and Hawaii so they’re beneath the continental US, and save the shifted shapefile.
R uses the <-
operator to define new variables. Here, we’re naming our new variable us_states
.
In our us_states
variable we’re going to store data on the 50 states downloaded using tigris
. Within (::
) tigris, we’re going to use the states()
function.
The states()
function allows you to pull state-level data from the Census. This function takes several arguments
The cb
argument can either be TRUE
or FALSE
. If cb = FALSE
tells Tigris() to download the most detailed shapefile. If cb = TRUE
it will download a generalized (1:5000k) file. After a lot of trial and error, I found that using cb = TRUE
prevents the Frankenstate from happening.
If the year
argument is omitted it will download the shapefile for the default year (currently 2020). I set out of habit from when I work with county boundaries. When I work with county boundaries I have to set the year because their boundaries change more than states.
Finally, the %>%
operator is part of the Tidyverse. It basically tells R “Hey! I’m not done, keep going to the next line!”
tigris::states()
downloads data for the 50 states and the United States’ minor outlying islands, Puerto Rico, and its associated territories. Each state and territory is assigned a unique two-digit Federal Information Processing Standard [FIPS] code.
They’re mostly consecutive (Alaska is 01) but when they were conceived of in the 1970s a couple were reserved for the US territories (American Samoa was 03), but in the updated version the “reserved codes” were left out and the territories were assigned to new numbers (American Samoa is now 60). The important bit about this is that the last official state (Wyoming) has a FIPS of 56.
- -This line of code uses the filter()
function on the STATEFP
variable downloaded using Tigris(). All it says is keep any row that has a FIPS of less than 57. This will keep only the 50 states and exclude the United States’ empire associated territories.
The shift_geometry()
is from the Tigris package. It takes two arguments preserve_area
and position
.
When preserve_area = FALSE
tigris will shrink Alaska’s size and increase Hawaii’s so that they are comparable to the size of the other states.
The position
argument can either be "below"
or "outside"
. When it’s below
, both Alaska and Hawaii are moved to be below California. When it’s outside
then Alaska is moved to be near Washington and Hawaii is moved to be near California.
Since I’m a born-theorist, I should warn you that messing with maps has inherent normative implications. The most common projection is Mercator which stretches the continents near the poles and squishes the ones near the equator.
- - - -One of the competing projections is Gall-Peters which claims to be more accurate because it was - at the time it was created in the 1980s - the only “area-correct map.” Though it has now been criticized for skewing the polar continents and the equatorial ones. The above photo shows you just how different the projects are from one another.
- -The problem arises because we’re trying to project a 3D object into 2D space. It’s a classic case of even though we can, maybe we shouldn’t. Computers can do these computations and change the projections to anything we want fairly easily. However, humans think and exist in metaphors. We assume bigger = better and up = good. When we project maps that puts the Northern Hemisphere as both upwards and larger than other parts of the world we are imbuing that projection with metaphorical meaning.
- -I caution you to be careful when creating maps. Think through the implications of something as simple as making Alaska more visually appealing by distorting it to be of similar size as the other states.
- -If you want to read more about map projections this is a good post. If you want to read more about metaphors, I suggest Metaphors We Live By by George Lakoff and Mark Johnson.
- -The sf
package includes a function called st_transform()
which will reproject the data for us. There are a lot of projects. You can read them at the proj website.
Leaflet requires all boundaries use the World Geodetic Service 1984 (WGS84) coordinate system. While making maps I’ve come across two main coordinate systems: WGS84 and North American Datum (1983). WGS84 uses the WGS84 ellipsoid and NAD83 uses the Geodetic Reference System (GRS80). From what I’ve gathered, the differences are slight, but leaflet requires WGS and the Census uses NAD83. As a result, we have to reproject the the data in order to make our map.
- -The st_transform
function takes four arguments, each preceded by a +
. All four arguments are required to transform the data from NAD83 to WGS84.
Briefly, +proj=longlat
tells R to use project the code into longitude and latitude [rather than, for example, transverse mercator (tmerc
)].
+ellps=WGS84
sets the ellipsoid to the WGS84 standard.
+datum=WGS84
is a holdover from previous proj releases. It tells R to use the WGS84 data.
+no_defs
is also a holdover.
Essentially, you need to include line 6 before you create the map, but after you do any data manipulation. It might throw some warnings which you can just ignore.
- -In the last line, we save the data we manipulated in lines 2-6. Strictly speaking you don’t have to save the shapefile. You can manipulate the data and then skip right to mapping the data. I caution against it because the files can get unreadable once you start using multiple data sets. I usually comment out line 9 after I save the file. That way I’m not saving and re-saving it whenever I need to run the code above it.
- -The st_write()
function is part of the sf
package and it takes two arguments. The first is the data set you want to save. Since I used us_states
to save the data, it will be the first argument in the st_write()
function call.
The second argument is the path to where you want the file saved and what name you want to give it. I named mine usa
. It is mandatory that you add .shp
to the end of the filepath so that R knows to save it as a shapefile.
Although it’s called a shapefile, it’s actually four files. I usually create a separate folder for each set of shapefiles and store that in one master folder called shapefiles. An example of my folder structure is below. I keep all of this in my GitHub repo and track changes using DVC.
- - - -On my C://
drive is My Documents
. In that folder I keep a GitHub
folder that holds all my repos, including my nps
one. Inside the nps
folder I separate my shapefiles into their own folder. For this tutorial I am using original and shifted shapefiles, so I’ve also separated them into two separate folders to keep things neat. I also know I’m going to have multiple shapefiles (one for the USA, one for the National Parks, and a final one for the State Parks) so I created a folder for each set. In the usa
folder I saved the shifted states shapefile.
Altogether, my line 9 would read:
- - - -Running that line will save the four necessary files that R needs to load the geographic data.
- -That’s it for method 1 using tigris
. The next section, method 2, shows how to load and transform a previously downloaded shapefile. If you used method 1, feel free to leave this post and go directly to mapping the shapefile in part II of this series.
In this section, I’ll go through the process of downloading the shapefiles from the Census website. If you tried method 1 and tigris caused the weird Frankenstate, you can try using the data downloaded from the Census website. I don’t know why it works, since tigris uses the same data, but it does.
- -Generally, though, finding and using shapefiles created by others is a great way to create cool maps. There are thousands of shapefiles available, many from ArcGis’ Open Data Website.
- -Save the file wherever you want, but I prefer to keep it within the “original” shapefiles folder in a sub-folder called “zips.” Once it downloads, unzip it - again, anywhere is fine. It will download all 30 Census shapefiles. We’re only going to use the one called “cb_2021_us_state_500k.zip”. The rest you can delete, if you want.
- - - -When you unzip the cb_2021_us_state_500k.zip, it will contain four files. You’ll only ever work with the .shp
file, but the other three are used in the background to display the data.
Once all the files are unzipped, we can load the .shp
file into R.
1
-2
-3
-4
-5
-6
-7
-8
-9
-
## load a previously downloaded shapefile
- usa <- read_sf("shapefiles/original/usa/states/cb_2021_us_state_500k.shp") %>%
- filter(STATEFP < 57) %>%
- shift_geometry(preserve_area = FALSE,
- position = "below") %>%
- sf::st_transform("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
-
- ## save the shifted shapefile
- st_write(usa, "path/to/file/usa.shp")
-
Everything except line 2 is the same as in method 1. I won’t go over lines 3-9 here, because all the information is above.
- -This line is very similar to the one above. I changed the name of the variable to usa
so I could keep both methods in the same R file (each R variable needs to be unique or it will be overwritten).
read_sf
is part of the sf() package. It’s used to load shapefiles into R. The path to the file is enclosed in quotation marks and parentheses. Simply navigate to wherever you unzipped the cb_2021_us_state_500k file and choose the file with the .shp
extension.
Once the shapefiles are downloaded - either using tigris() or by loading the shapefiles from the Census website - you can create the base map. I’ll tackle making the base map in part II of this series.
-:ET \ No newline at end of file diff --git a/.jekyll-cache/Jekyll/Cache/Jekyll--Converters--Markdown/26/69d16ec18aed49e7e7a7bb1d9a330c2e1b48cf94e49f38c1ca4ad936353dc6 b/.jekyll-cache/Jekyll/Cache/Jekyll--Converters--Markdown/26/69d16ec18aed49e7e7a7bb1d9a330c2e1b48cf94e49f38c1ca4ad936353dc6 new file mode 100644 index 0000000..f86bd1b --- /dev/null +++ b/.jekyll-cache/Jekyll/Cache/Jekyll--Converters--Markdown/26/69d16ec18aed49e7e7a7bb1d9a330c2e1b48cf94e49f38c1ca4ad936353dc6 @@ -0,0 +1,185 @@ +I"!Welcome to part four of my cartography in R series. In this post, we’ll download and process the state park data before adding it to the base map created in part II.
+ + + + + + | + + + +read more +This is part three of my cartography in R series. If you are just finding this, I suggest taking a look at part I and part II first.
+ +In this post, I will download and process the National Park data. Once that’s done, I’ll add it to the base map I created in part II.
+ + + + + + | + + + +read more +This is a continuation of my previous post where I walked through how to download and modify shapefile data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa.
+ + + + + | + + + +read more +Twitter is a great resource for engaging with the academic community. For example, I saw this Tweet by PhD Genie asking users to name one positive skill learned during their PhD. I love this question for a number of reasons. First, it helps PhDs reframe their experience so it’s applicable outside of academia - which can help when applying to jobs. Second, it’s really cool to see what skills other people have learned during their program.
+ + + + + | + + + +read more +Version control is helpful when you want to track your project’s changes. However, GitHub has one major (yet, understandable) shortcoming: file size. The free version of GitHub will warn you if your file is over 50MB and completely reject your push if the file is over 100MB. This is a huge problem when you’re working with shapefiles (.shp) which contain the geographic coordinates necessary for cartography. “Officially” there are three ways around GitHub’s file size limits, but I have a clear favorite.
+ + + + + + | + + + +read more +Building off my post about using Git & GitHub, this post is about using the GitHub website to initialize repos and get URLs from existing repos to clone them.
+ + + + + + | + + + +read more +This is part of my tutorial series on using Git and GitHub. In particular, this guide is about using GitHub’s desktop app for creating and managing repos. You can download the app here..
+ + + + + | + + + +read more +Arguably one of the best things you can do before starting a PhD is invest time in learning how to properly use version control. With version control, you can track, save, and revert changes to any kind of project. There are several options available, but I’m partial to Git & GitHub. Even if you never touch a piece of code, version control is very helpful.
+ + + + + + | + + + +read more ++This area will not be a real blog, in the sense that it will probably not have regular updates. +
+ + + + + | + + + +read more +This is a continuation of my previous post where I walked through how to download and modify shape data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa. +I"Ś\
This is a continuation of my previous post where I walked through how to download and modify shapefile data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa.
In this post, I’ll go over how to use Leaflet to map the shapefile we made in the previous post. If you’ve come here from part one of the series, you probably have the libraries and data loaded already. However, if you don’t, be sure to load the libraries and shapefiles before moving to number two.
@@ -167,7 +167,7 @@You can leave the base map like this if you want, but all additional data will be added as a layer on top</i>* of this map which can become distracting very quickly. I prefer to make my base maps as basic and unobtrusive as possible so the data I add on top of the base map is more prominent.
+You can leave the base map like this if you want, but all additional data will be added as a layer on top of this map which can become distracting very quickly. I prefer to make my base maps as basic and unobtrusive as possible so the data I add on top of the base map is more prominent.
This is part three of my cartography in R series. If you are just finding this, I suggest taking a look at part I and part II first.
+I"TćThis is part three of my cartography in R series. If you are just finding this, I suggest taking a look at part I and part II first.
In this post, I will download and process the National Park data. Once that’s done, I’ll add it to the base map I created in part II.
@@ -256,7 +256,7 @@nps <- read_sf("path/to/file.shp")
loads the National Park data set to a variable called nps
using the read_sf()
function that is part of the sf package. You will need to change the file path so it reflects where you saved the data on your hard drive.
The %>%
operator is part of the tidyverse package. It tells R to go to the next line and process the next command. It has to go at the end of a line, rather than the beginning.
The %>%
operator is part of the tidyverse package. It tells R to go to the next line and process the next command. The >%>
has to go at the end of a line, rather than the beginning.
The geometry column is specific to shapefiles and it includes the coordinates of the shape. It will be kept automatically - unless you use the st_drop_geometry()
function. I like to specifically select so I remember it’s there.
mutate()
is part of the tidyverse package and it’s extremely versatile. It is mainly used to create new variables or modify existing ones.
The NPS data set has 23 different types of National Parks listed (you can view all of them by running levels(as.factor(nps$UNIT_TYPE))
). I know that in later posts, I’m going to color code the land by type (blue for rivers, green for national parks, etc) so I wanted to reduce the number of colors I would have to use.
The NPS data set has 23 different types of National Parks listed (you can view all of them by running levels(as.factor(nps$UNIT_TYPE))
). I know that in later posts, I’m going to color code the land by type (blue for rivers, green for national parks, etc) so I wanted to reduce the number of colors I will have to use.
mutate()
’s first argument, type =
creates a new column called type
. R will populate the newly created column with whatever comes after the first (singular) equal =
sign. For example, I can put type = NA
and every row in the column will say NA
.
Line 30 creates the new column, visited
and uses case_when
to look for the names of the parks that I’ve been to. If I have visited them, it adds visited
to the column of the same name.
The last line, TRUE ~ "not_visited))
, acts as an else statement. For any park not listed above, it will put not visited
in the visited
column I created.
The last line, TRUE ~ "not_visited"))
, acts as an else statement. For any park not listed above, it will put not visited
in the visited
column I created.
This feels like a very brute-force method of tracking which parks I’ve visited, but I haven’t spend much time trying to find another way.
@@ -359,7 +359,7 @@In part I, when I made the base map, I moved Alaska and Hawaii so they were of similar size and closer to the continental USA. For the map to display the parks correctly, I have to shift them as well.
-I went over these two lines in part II, so I won’t go over them again here. If you want to read more about them, check out that post.
+I went over these two lines in part II, so I won’t go over them again here. If you want to read more about them, check out that post.
Define the color and transparency of the National Parks. In a future post, I am going to change the color of each type of public land, but for now, I’ll make them all a nice sage green color #354f52
. I also want to make the parks to be fully opaque.
Define the color and transparency of the National Parks. In a future post, I am going to change the color of each type of public land, but for now, I’ll make them all a nice sage green color #354f52
. I also want to make the parks fully opaque.
This is a continuation of my previous post where I walked through how to download and modify shape data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa.
+This is a continuation of my previous post where I walked through how to download and modify shapefile data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa.
@@ -164,7 +164,7 @@ This area will not be a real blog, in the sense that it will probably not have rWelcome to part four of my cartography in R series. In this post, we’ll download and process the state park data before adding it to the base map created in part II.
+ + + + + + | + + + +read more +This is part three of my cartography in R series. If you are just finding this, I suggest taking a look at part I and part II first.
+ +In this post, I will download and process the National Park data. Once that’s done, I’ll add it to the base map I created in part II.
+ + + + + + | + + + +read more +This is a continuation of my previous post where I walked through how to download and modify shapefile data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa.
+ + + + + | + + + +read more +Twitter is a great resource for engaging with the academic community. For example, I saw this Tweet by PhD Genie asking users to name one positive skill learned during their PhD. I love this question for a number of reasons. First, it helps PhDs reframe their experience so it’s applicable outside of academia - which can help when applying to jobs. Second, it’s really cool to see what skills other people have learned during their program.
+ + + + + | + + + +read more +Version control is helpful when you want to track your project’s changes. However, GitHub has one major (yet, understandable) shortcoming: file size. The free version of GitHub will warn you if your file is over 50MB and completely reject your push if the file is over 100MB. This is a huge problem when you’re working with shapefiles (.shp) which contain the geographic coordinates necessary for cartography. “Officially” there are three ways around GitHub’s file size limits, but I have a clear favorite.
+ + + + + + | + + + +read more +Building off my post about using Git & GitHub, this post is about using the GitHub website to initialize repos and get URLs from existing repos to clone them.
+ + + + + + | + + + +read more +This is part of my tutorial series on using Git and GitHub. In particular, this guide is about using GitHub’s desktop app for creating and managing repos. You can download the app here..
+ + + + + | + + + +read more +Arguably one of the best things you can do before starting a PhD is invest time in learning how to properly use version control. With version control, you can track, save, and revert changes to any kind of project. There are several options available, but I’m partial to Git & GitHub. Even if you never touch a piece of code, version control is very helpful.
+ + + + + + | + + + +read more ++This area will not be a real blog, in the sense that it will probably not have regular updates. +
+ + + + + | + + + +read more +Welcome to part four of my cartography in R series. In this post, we’ll download and process the state park data before adding it to the base map created in part II.
+ + + + + + | + + + +read more +This is part three of my cartography in R series. If you are just finding this, I suggest taking a look at part I and part II first.
+ +In this post, I will download and process the National Park data. Once that’s done, I’ll add it to the base map I created in part II.
+ + + + + + | + + + +read more +This is a continuation of my previous post where I walked through how to download and modify shapefile data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa.
+ + + + + | + + + +read more +Twitter is a great resource for engaging with the academic community. For example, I saw this Tweet by PhD Genie asking users to name one positive skill learned during their PhD. I love this question for a number of reasons. First, it helps PhDs reframe their experience so it’s applicable outside of academia - which can help when applying to jobs. Second, it’s really cool to see what skills other people have learned during their program.
+ + + + + | + + + +read more +Version control is helpful when you want to track your project’s changes. However, GitHub has one major (yet, understandable) shortcoming: file size. The free version of GitHub will warn you if your file is over 50MB and completely reject your push if the file is over 100MB. This is a huge problem when you’re working with shapefiles (.shp) which contain the geographic coordinates necessary for cartography. “Officially” there are three ways around GitHub’s file size limits, but I have a clear favorite.
+ + + + + + | + + + +read more +Building off my post about using Git & GitHub, this post is about using the GitHub website to initialize repos and get URLs from existing repos to clone them.
+ + + + + + | + + + +read more +This is part of my tutorial series on using Git and GitHub. In particular, this guide is about using GitHub’s desktop app for creating and managing repos. You can download the app here..
+ + + + + | + + + +read more +Arguably one of the best things you can do before starting a PhD is invest time in learning how to properly use version control. With version control, you can track, save, and revert changes to any kind of project. There are several options available, but I’m partial to Git & GitHub. Even if you never touch a piece of code, version control is very helpful.
+ + + + + + | + + + +read more ++This area will not be a real blog, in the sense that it will probably not have regular updates. +
+ + + + + | + + + +read more +This is a continuation of my previous post where I walked through how to download and modify shapefile data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa.
+:ET \ No newline at end of file diff --git a/.jekyll-cache/Jekyll/Cache/Jekyll--Converters--Markdown/c1/a0e6f48d038d15f7f767503790d3930f822981fe4c43979b74c954c21e96c3 b/.jekyll-cache/Jekyll/Cache/Jekyll--Converters--Markdown/c1/a0e6f48d038d15f7f767503790d3930f822981fe4c43979b74c954c21e96c3 new file mode 100644 index 0000000..132821f --- /dev/null +++ b/.jekyll-cache/Jekyll/Cache/Jekyll--Converters--Markdown/c1/a0e6f48d038d15f7f767503790d3930f822981fe4c43979b74c954c21e96c3 @@ -0,0 +1,184 @@ +I"!Welcome to part four of my cartography in R series. In this post, we’ll download and process the state park data before adding it to the base map created in part II.
+ + + + + + | + + + +read more +This is part three of my cartography in R series. If you are just finding this, I suggest taking a look at part I and part II first.
+ +In this post, I will download and process the National Park data. Once that’s done, I’ll add it to the base map I created in part II.
+ + + + + + | + + + +read more +This is a continuation of my previous post where I walked through how to download and modify shapefile data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa.
+ + + + + | + + + +read more +Twitter is a great resource for engaging with the academic community. For example, I saw this Tweet by PhD Genie asking users to name one positive skill learned during their PhD. I love this question for a number of reasons. First, it helps PhDs reframe their experience so it’s applicable outside of academia - which can help when applying to jobs. Second, it’s really cool to see what skills other people have learned during their program.
+ + + + + | + + + +read more +Version control is helpful when you want to track your project’s changes. However, GitHub has one major (yet, understandable) shortcoming: file size. The free version of GitHub will warn you if your file is over 50MB and completely reject your push if the file is over 100MB. This is a huge problem when you’re working with shapefiles (.shp) which contain the geographic coordinates necessary for cartography. “Officially” there are three ways around GitHub’s file size limits, but I have a clear favorite.
+ + + + + + | + + + +read more +Building off my post about using Git & GitHub, this post is about using the GitHub website to initialize repos and get URLs from existing repos to clone them.
+ + + + + + | + + + +read more +This is part of my tutorial series on using Git and GitHub. In particular, this guide is about using GitHub’s desktop app for creating and managing repos. You can download the app here..
+ + + + + | + + + +read more +Arguably one of the best things you can do before starting a PhD is invest time in learning how to properly use version control. With version control, you can track, save, and revert changes to any kind of project. There are several options available, but I’m partial to Git & GitHub. Even if you never touch a piece of code, version control is very helpful.
+ + + + + + | + + + +read more ++This area will not be a real blog, in the sense that it will probably not have regular updates. +
+ + + + + | + + + +read more +This is a continuation of my previous post where I walked through how to download and modify shape data. I also showed how to shift Alaska and Hawaii so they are closer to the continental usa.
-:ET \ No newline at end of file diff --git a/_layouts/post.html b/_layouts/post.html index b7fb7f1..075055c 100644 --- a/_layouts/post.html +++ b/_layouts/post.html @@ -5,10 +5,10 @@