forked from hdrake/cmip6hack-multigen
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
11972 lines (11174 loc) · 945 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{kellogg_climate_1974,
title = {Climate {Stabilization}: {For} {Better} or for {Worse}?},
volume = {186},
copyright = {1974 by the American Association for the Advancement of Science},
issn = {0036-8075, 1095-9203},
shorttitle = {Climate {Stabilization}},
url = {https://science.sciencemag.org/content/186/4170/1163},
doi = {10.1126/science.186.4170.1163},
language = {en},
number = {4170},
urldate = {2020-03-20},
journal = {Science},
author = {Kellogg, W. W. and Schneider, S. H.},
month = dec,
year = {1974},
pmid = {17833918},
note = {Publisher: American Association for the Advancement of Science
Section: Articles},
pages = {1163--1172}
}
@article{schneider_carbon_1975,
title = {On the {Carbon} {Dioxide}–{Climate} {Confusion}},
volume = {32},
issn = {0022-4928},
url = {https://journals.ametsoc.org/doi/abs/10.1175/1520-0469%281975%29032%3C2060%3AOTCDC%3E2.0.CO%3B2},
doi = {10.1175/1520-0469(1975)032<2060:OTCDC>2.0.CO;2},
abstract = {A number of estimates of global surface temperature sensitivity to a doubling of atmospheric carbon dioxide to 600 ppm are collected here and critically reviewed. The assumptions and formulations that lead to differences between certain models' estimates are explained in some detail. Based on current understanding of climate theory and modeling it is concluded that a state-of-the-art order-of-magnitude estimate for the global surface temperature increase from a doubling of atmospheric C02 content is between 1.5 and 3 K with an amplification of the global average increase in polar zones. It is pointed out, however, that this estimate may prove to be high or low by several-fold as a result of climatic feedback mechanisms not properly accounted for in state-of-the-art models.},
number = {11},
urldate = {2020-03-20},
journal = {Journal of the Atmospheric Sciences},
author = {Schneider, Stephen H.},
month = nov,
year = {1975},
note = {Publisher: American Meteorological Society},
pages = {2060--2066}
}
@article{flegal_solar_2019,
title = {Solar {Geoengineering}: {Social} {Science}, {Legal}, {Ethical}, and {Economic} {Frameworks}},
volume = {44},
shorttitle = {Solar {Geoengineering}},
url = {https://doi.org/10.1146/annurev-environ-102017-030032},
doi = {10.1146/annurev-environ-102017-030032},
abstract = {Solar geoengineering research in the social sciences and humanities has largely evolved in parallel with research in the natural sciences. In this article, we review the current state of the literature on the ethical, legal, economic, and social science aspects of this emerging area. We discuss issues regarding the framing and futures of solar geoengineering, empirical social science on public views and public engagement, the evolution of ethical concerns regarding research and deployment, and the current legal and economic frameworks and emerging proposals for the regulation and governance of solar geoengineering.},
number = {1},
urldate = {2020-03-20},
journal = {Annual Review of Environment and Resources},
author = {Flegal, Jane A. and Hubert, Anna-Maria and Morrow, David R. and Moreno-Cruz, Juan B.},
year = {2019},
note = {\_eprint: https://doi.org/10.1146/annurev-environ-102017-030032},
pages = {399--423}
}
@article{sherwood_adaptability_2010,
title = {An adaptability limit to climate change due to heat stress},
volume = {107},
issn = {0027-8424},
url = {https://www.pnas.org/content/107/21/9552},
doi = {10.1073/pnas.0913352107},
abstract = {Despite the uncertainty in future climate-change impacts, it is often assumed that humans would be able to adapt to any possible warming. Here we argue that heat stress imposes a robust upper limit to such adaptation. Peak heat stress, quantified by the wet-bulb temperature TW, is surprisingly similar across diverse climates today. TW never exceeds 31 °C. Any exceedence of 35 °C for extended periods should induce hyperthermia in humans and other mammals, as dissipation of metabolic heat becomes impossible. While this never happens now, it would begin to occur with global-mean warming of about 7 °C, calling the habitability of some regions into question. With 11–12 °C warming, such regions would spread to encompass the majority of the human population as currently distributed. Eventual warmings of 12 °C are possible from fossil fuel burning. One implication is that recent estimates of the costs of unmitigated climate change are too low unless the range of possible warming can somehow be narrowed. Heat stress also may help explain trends in the mammalian fossil record.},
number = {21},
journal = {Proceedings of the National Academy of Sciences},
author = {Sherwood, Steven C. and Huber, Matthew},
year = {2010},
note = {Publisher: National Academy of Sciences
\_eprint: https://www.pnas.org/content/107/21/9552.full.pdf},
pages = {9552--9555}
}
@article{mcmichael_climate_2010,
title = {Climate change: {Heat}, health, and longer horizons},
volume = {107},
issn = {0027-8424, 1091-6490},
shorttitle = {Climate change},
url = {https://www.pnas.org/content/107/21/9483},
doi = {10.1073/pnas.1004894107},
abstract = {Public concern over climate-change impacts has mostly focused on the economic, physical, and political domains. The consequences for various industries, agriculture, livelihoods, national gross domestic product, property, infrastructure, and electoral prospects have captured most attention. In this issue of PNAS, Sherwood and Huber (1) apply a longer than usual perspective on climate change and conclude that, because of limits to human tolerance of heat, much of Earth’s surface may not be habitable by 2300. Their important, related, and overarching statement is that “current assessments are underestimating the seriousness of climate change” (1). They argue that, whereas high-profile threats such as sea-level rise and economic slowdown have caused widespread anxieties, their impacts on human communities would pale into insignificance in a world that might, thermally, become partly or wholly uninhabitable by humans.
This chord needs to be struck. The world’s human population is playing for higher stakes than have generally been recognized. Global climate change (along with today’s other human-induced, large-scale systemic environmental changes) poses great risks to the planet’s existing life-support systems and conditions. Nearly all of the adverse consequences of climate change—reduced regional food yields, freshwater shortages, increased frequency of extreme weather events, coastal population displacement, changes in the ecology and geography of infectious agents, declines in farming community incomes, and biodiversity losses with accompanying disruption of ecosystem functions—will converge adversely on human biology and health. Climate change, ultimately, is a threat to our biological health and survival (2).
There are four main threads to the authors’ argument about future heat extremes. ( i ) When the modifying effect of humidity on perceived (i.e., physiologically experienced) heat is allowed, the present range of extreme climatic conditions around the globe is actually rather limited: the hottest places tend to be dry, so that wet-bulb temperatures (TW) essentially never exceed 31 …},
language = {en},
number = {21},
urldate = {2020-03-19},
journal = {Proceedings of the National Academy of Sciences},
author = {McMichael, Anthony J. and Dear, Keith B. G.},
month = may,
year = {2010},
pmid = {20483994},
note = {Publisher: National Academy of Sciences
Section: Commentary},
pages = {9483--9484}
}
@misc{environment_adaptation_2018,
title = {Adaptation {Gap} report},
url = {http://www.unenvironment.org/resources/adaptation-gap-report},
abstract = {This is the fourth edition of the UN Environment Adaptation Gap Reports. Since 2014, these reports have focused on exploring adaptation gaps, characterized as the difference between the actual level of adaptation and the level required to achieve a societal goal. The adoption of the Paris Agreement established a global goal on adaptation of “enhancing adaptive capacity, strengthening resilience and reducing vulnerability to climate change, with a view to contributing to sustainable development and ensuring an adequate adaptation response in the context of the temperature goal”. As the Paris Agreement is now being implemented, important decisions are about to be made on how to report on, and take stock of, progress towards this global goal. The Adaptation Gap Reports focus on providing policy-relevant information to support such efforts.
The focus of the 2018 report is dual: The first part examines the gaps that exist in a number of areas that are central to taking stock and assessing progress on adaptation, namely the enabling environment as expressed through laws and policies, key development aspects of adaptive capacity, and the costs of and finance needed for adaptation. The second part of the report focuses on the adaptation gap in one particular sector, namely health. Based on the available scientific evidence on climate impacts and health outcomes, the second part provides an overview of the global adaptation gap in health, followed by a specific focus on three key areas of climate-related health risks: heat and extreme events, climate-sensitive infectious diseases, and food and nutritional security},
language = {en},
urldate = {2020-03-19},
journal = {UNEP - UN Environment Programme},
author = {Environment, U. N.},
month = dec,
year = {2018},
note = {Library Catalog: www.unenvironment.org
Section: publications}
}
@article{geoffroy_transient_2012,
title = {Transient {Climate} {Response} in a {Two}-{Layer} {Energy}-{Balance} {Model}. {Part} {I}: {Analytical} {Solution} and {Parameter} {Calibration} {Using} {CMIP5} {AOGCM} {Experiments}},
volume = {26},
issn = {0894-8755},
shorttitle = {Transient {Climate} {Response} in a {Two}-{Layer} {Energy}-{Balance} {Model}. {Part} {I}},
url = {https://journals.ametsoc.org/doi/full/10.1175/JCLI-D-12-00195.1},
doi = {10.1175/JCLI-D-12-00195.1},
abstract = {This is the first part of a series of two articles analyzing the global thermal properties of atmosphere–ocean coupled general circulation models (AOGCMs) within the framework of a two-layer energy-balance model (EBM). In this part, the general analytical solution of the system is given and two idealized climate change scenarios, one with a step forcing and one with a linear forcing, are discussed. These solutions give a didactic description of the contributions from the equilibrium response and of the fast and slow transient responses during a climate transition. Based on these analytical solutions, a simple and physically based procedure to calibrate the two-layer model parameters using an AOGCM step-forcing experiment is introduced. Using this procedure, the global thermal properties of 16 AOGCMs participating in phase 5 of the Coupled Model Intercomparison Project (CMIP5) are determined. It is shown that, for a given AOGCM, the EBM tuned with only the abrupt 4×CO2 experiment is able to reproduce with a very good accuracy the temperature evolution in both a step-forcing and a linear-forcing experiment. The role of the upper-ocean and deep-ocean heat uptakes in the fast and slow responses is also discussed. One of the main weaknesses of the simple EBM discussed in this part is its ability to represent the evolution of the top-of-the-atmosphere radiative imbalance in the transient regime. This issue is addressed in Part II by taking into account the efficacy factor of deep-ocean heat uptake.},
number = {6},
urldate = {2020-03-19},
journal = {Journal of Climate},
author = {Geoffroy, O. and Saint-Martin, D. and Olivié, D. J. L. and Voldoire, A. and Bellon, G. and Tytéca, S.},
month = sep,
year = {2012},
note = {Publisher: American Meteorological Society},
pages = {1841--1857}
}
@article{lenssen_improvements_2019,
title = {Improvements in the {GISTEMP} {Uncertainty} {Model}},
volume = {124},
copyright = {©2019. American Geophysical Union. All Rights Reserved.},
issn = {2169-8996},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2018JD029522},
doi = {10.1029/2018JD029522},
abstract = {We outline a new and improved uncertainty analysis for the Goddard Institute for Space Studies Surface Temperature product version 4 (GISTEMP v4). Historical spatial variations in surface temperature anomalies are derived from historical weather station data and ocean data from ships, buoys, and other sensors. Uncertainties arise from measurement uncertainty, changes in spatial coverage of the station record, and systematic biases due to technology shifts and land cover changes. Previously published uncertainty estimates for GISTEMP included only the effect of incomplete station coverage. Here, we update this term using currently available spatial distributions of source data, state-of-the-art reanalyses, and incorporate independently derived estimates for ocean data processing, station homogenization, and other structural biases. The resulting 95\% uncertainties are near 0.05 °C in the global annual mean for the last 50 years and increase going back further in time reaching 0.15 °C in 1880. In addition, we quantify the benefits and inherent uncertainty due to the GISTEMP interpolation and averaging method. We use the total uncertainties to estimate the probability for each record year in the GISTEMP to actually be the true record year (to that date) and conclude with 87\% likelihood that 2016 was indeed the hottest year of the instrumental period (so far).},
language = {en},
number = {12},
urldate = {2020-03-19},
journal = {Journal of Geophysical Research: Atmospheres},
author = {Lenssen, Nathan J. L. and Schmidt, Gavin A. and Hansen, James E. and Menne, Matthew J. and Persin, Avraham and Ruedy, Reto and Zyss, Daniel},
year = {2019},
note = {\_eprint: https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2018JD029522},
pages = {6307--6326}
}
@misc{noauthor_pii_nodate,
title = {{PII}: {S0305}-{750X}(00)00071-1 {\textbar} {Elsevier} {Enhanced} {Reader}},
shorttitle = {{PII}},
url = {https://reader.elsevier.com/reader/sd/pii/S0305750X00000711?token=E5BFFC968147D3E5A7273D1780C04F06B04263BBF06B6B9F2CD57B6ABBCE260AE860E529E76CD48F146EED9AC7AC2E72},
language = {en},
urldate = {2020-03-14},
doi = {10.1016/S0305-750X(00)00071-1},
note = {Library Catalog: reader.elsevier.com}
}
@article{broome_discounting_1994,
title = {Discounting the {Future}},
volume = {23},
issn = {0048-3915},
url = {https://www.jstor.org/stable/2265483},
number = {2},
urldate = {2020-03-14},
journal = {Philosophy \& Public Affairs},
author = {Broome, John},
year = {1994},
note = {Publisher: Wiley},
pages = {128--156}
}
@misc{noauthor_paris_nodate,
title = {The {Paris} {Agreement} {\textbar} {UNFCCC}},
url = {https://unfccc.int/process-and-meetings/the-paris-agreement/the-paris-agreement},
urldate = {2020-03-14}
}
@book{stern_economics_2007,
title = {The {Economics} of {Climate} {Change}: {The} {Stern} {Review}},
isbn = {978-0-521-70080-1},
shorttitle = {The {Economics} of {Climate} {Change}},
abstract = {There is now clear scientific evidence that emissions from economic activity, particularly the burning of fossil fuels for energy, are causing changes to the Earth's climate. A sound understanding of the economics of climate change is needed in order to underpin an effective global response to this challenge. The Stern Review is an independent, rigourous and comprehensive analysis of the economic aspects of this crucial issue. It has been conducted by Sir Nicholas Stern, Head of the UK Government Economic Service, and a former Chief Economist of the World Bank. The Economics of Climate Change will be invaluable for all students of the economics and policy implications of climate change, and economists, scientists and policy makers involved in all aspects of climate change.},
language = {en},
publisher = {Cambridge University Press},
author = {Stern, Nicholas and Stern, Nicholas Herbert and Treasury, Great Britain},
month = jan,
year = {2007},
keywords = {Business \& Economics / Economics / General, Science / Earth Sciences / Meteorology \& Climatology, Science / Environmental Science, Science / Global Warming \& Climate Change}
}
@article{fuss_negative_2018,
title = {Negative emissions—{Part} 2: {Costs}, potentials and side effects},
volume = {13},
issn = {1748-9326},
shorttitle = {Negative emissions—{Part} 2},
url = {https://doi.org/10.1088%2F1748-9326%2Faabf9f},
doi = {10.1088/1748-9326/aabf9f},
abstract = {The most recent IPCC assessment has shown an important role for negative emissions technologies (NETs) in limiting global warming to 2 °C cost-effectively. However, a bottom-up, systematic, reproducible, and transparent literature assessment of the different options to remove CO2 from the atmosphere is currently missing. In part 1 of this three-part review on NETs, we assemble a comprehensive set of the relevant literature so far published, focusing on seven technologies: bioenergy with carbon capture and storage (BECCS), afforestation and reforestation, direct air carbon capture and storage (DACCS), enhanced weathering, ocean fertilisation, biochar, and soil carbon sequestration. In this part, part 2 of the review, we present estimates of costs, potentials, and side-effects for these technologies, and qualify them with the authors’ assessment. Part 3 reviews the innovation and scaling challenges that must be addressed to realise NETs deployment as a viable climate mitigation strategy. Based on a systematic review of the literature, our best estimates for sustainable global NET potentials in 2050 are 0.5–3.6 GtCO2 yr−1 for afforestation and reforestation, 0.5–5 GtCO2 yr−1 for BECCS, 0.5–2 GtCO2 yr−1 for biochar, 2–4 GtCO2 yr−1 for enhanced weathering, 0.5–5 GtCO2 yr−1 for DACCS, and up to 5 GtCO2 yr−1 for soil carbon sequestration. Costs vary widely across the technologies, as do their permanency and cumulative potentials beyond 2050. It is unlikely that a single NET will be able to sustainably meet the rates of carbon uptake described in integrated assessment pathways consistent with 1.5 °C of global warming.},
language = {en},
number = {6},
urldate = {2020-03-14},
journal = {Environmental Research Letters},
author = {Fuss, Sabine and Lamb, William F. and Callaghan, Max W. and Hilaire, Jérôme and Creutzig, Felix and Amann, Thorben and Beringer, Tim and Garcia, Wagner de Oliveira and Hartmann, Jens and Khanna, Tarun and Luderer, Gunnar and Nemet, Gregory F. and Rogelj, Joeri and Smith, Pete and Vicente, José Luis Vicente and Wilcox, Jennifer and Dominguez, Maria del Mar Zamora and Minx, Jan C.},
month = may,
year = {2018},
note = {Publisher: IOP Publishing},
pages = {063002}
}
@article{minx_negative_2018,
title = {Negative emissions—{Part} 1: {Research} landscape and synthesis},
volume = {13},
issn = {1748-9326},
shorttitle = {Negative emissions—{Part} 1},
url = {https://doi.org/10.1088%2F1748-9326%2Faabf9b},
doi = {10.1088/1748-9326/aabf9b},
abstract = {With the Paris Agreement’s ambition of limiting climate change to well below 2 °C, negative emission technologies (NETs) have moved into the limelight of discussions in climate science and policy. Despite several assessments, the current knowledge on NETs is still diffuse and incomplete, but also growing fast. Here, we synthesize a comprehensive body of NETs literature, using scientometric tools and performing an in-depth assessment of the quantitative and qualitative evidence therein. We clarify the role of NETs in climate change mitigation scenarios, their ethical implications, as well as the challenges involved in bringing the various NETs to the market and scaling them up in time. There are six major findings arising from our assessment: first, keeping warming below 1.5 °C requires the large-scale deployment of NETs, but this dependency can still be kept to a minimum for the 2 °C warming limit. Second, accounting for economic and biophysical limits, we identify relevant potentials for all NETs except ocean fertilization. Third, any single NET is unlikely to sustainably achieve the large NETs deployment observed in many 1.5 °C and 2 °C mitigation scenarios. Yet, portfolios of multiple NETs, each deployed at modest scales, could be invaluable for reaching the climate goals. Fourth, a substantial gap exists between the upscaling and rapid diffusion of NETs implied in scenarios and progress in actual innovation and deployment. If NETs are required at the scales currently discussed, the resulting urgency of implementation is currently neither reflected in science nor policy. Fifth, NETs face severe barriers to implementation and are only weakly incentivized so far. Finally, we identify distinct ethical discourses relevant for NETs, but highlight the need to root them firmly in the available evidence in order to render such discussions relevant in practice.},
language = {en},
number = {6},
urldate = {2020-03-14},
journal = {Environmental Research Letters},
author = {Minx, Jan C. and Lamb, William F. and Callaghan, Max W. and Fuss, Sabine and Hilaire, Jérôme and Creutzig, Felix and Amann, Thorben and Beringer, Tim and Garcia, Wagner de Oliveira and Hartmann, Jens and Khanna, Tarun and Lenzi, Dominic and Luderer, Gunnar and Nemet, Gregory F. and Rogelj, Joeri and Smith, Pete and Vicente, Jose Luis Vicente and Wilcox, Jennifer and Dominguez, Maria del Mar Zamora},
month = may,
year = {2018},
note = {Publisher: IOP Publishing},
pages = {063001}
}
@misc{noauthor_discounting_nodate,
title = {Discounting the {Future} on {JSTOR}},
url = {http://www.jstor.org/stable/2265483},
abstract = {John Broome, Discounting the Future, Philosophy \& Public Affairs, Vol. 23, No. 2 (Spring, 1994), pp. 128-156},
language = {en},
urldate = {2020-03-14},
note = {Library Catalog: www-jstor-org.libproxy.mit.edu}
}
@article{newell_discounting_2003,
title = {Discounting the distant future: how much do uncertain rates increase valuations?},
volume = {46},
issn = {0095-0696},
shorttitle = {Discounting the distant future},
url = {http://www.sciencedirect.com/science/article/pii/S0095069602000311},
doi = {10.1016/S0095-0696(02)00031-1},
abstract = {We demonstrate that when the future path of the discount rate is uncertain and highly correlated, the distant future should be discounted at significantly lower rates than suggested by the current rate. We then use two centuries of US interest rate data to quantify this effect. Using both random walk and mean-reverting models, we compute the “certainty-equivalent rate” that summarizes the effect of uncertainty and measures the appropriate forward rate of discount in the future. Under the random walk model we find that the certainty-equivalent rate falls continuously from 4\% to 2\% after 100 years, 1\% after 200 years, and 0.5\% after 300 years. At horizons of 400 years, the discounted value increases by a factor of over 40,000 relative to conventional discounting. Applied to climate change mitigation, we find that incorporating discount rate uncertainty almost doubles the expected present value of mitigation benefits.},
language = {en},
number = {1},
urldate = {2020-03-14},
journal = {Journal of Environmental Economics and Management},
author = {Newell, Richard G. and Pizer, William A.},
month = jul,
year = {2003},
keywords = {Climate policy, Discounting, Interest rate forecasting, Intergenerational equity, Uncertainty},
pages = {52--71}
}
@article{hammitt_sequential-decision_1992,
title = {A sequential-decision strategy for abating climate change},
volume = {357},
copyright = {1992 Nature Publishing Group},
issn = {1476-4687},
url = {https://www.nature.com/articles/357315a0},
doi = {10.1038/357315a0},
abstract = {CURRENT debate on policies for limiting climate change due to greenhouse-gas emissions focuses on whether to take action now or later, and on how stringent any emissions reductions should be in the near and long term. Any reductions policies implemented now will need to be revised later as scientific understanding of climate change improves. Here we consider the effects of a sequential-decision strategy (Fig. 1) consisting of a near-term period (1992–2002) during which either moderate emissions reductions (achieved by energy conservation only) or aggressive reductions (energy conservation coupled with switching to other fuel sources) are begun, and a subsequent long-term period during which a least-cost abatement policy is followed to limit global mean temperature change to an optimal target ΔT*. For each policy we calculate the global mean surface temperature change ΔT(t) using a simple climate/ocean model for climate sensitivities ΔT2x. (the response to doubled CO2, concentrations) of 4.5,2.5,1.5 and 0.5 °C. The policy beginning with moderate reductions is less expensive than that with aggressive reductions if ΔT*{\textgreater}2.9, 2.1, 1.5 and 0.9 °C respectively; otherwise, the aggressive-reductions policy is cheaper. We suggest that this approach should assist in choosing realistic targets and in determining how best to implement emissions reductions in the short and long term.},
language = {en},
number = {6376},
urldate = {2020-03-13},
journal = {Nature},
author = {Hammitt, James K. and Lempert, Robert J. and Schlesinger, Michael E.},
month = may,
year = {1992},
note = {Number: 6376
Publisher: Nature Publishing Group},
pages = {315--318}
}
@article{hammitt_value_1996,
title = {The value of international cooperation for abating global climate change},
volume = {18},
issn = {0928-7655},
url = {http://www.sciencedirect.com/science/article/pii/S0928765596000085},
doi = {10.1016/S0928-7655(96)00008-5},
abstract = {Because abatement of global climate change is a public good, independent national actions may not produce the efficient quantity. Using a numerical integrated-assessment model, abatement costs and damages induced by climate change are compared at the cooperative and noncooperative solutions to a set of two-party dynamic games between the industrialized and developing countries. Games with perfect and imperfect information about climate and economic factors are considered. Across 144 games with perfect information, incorporating different values of climate and economic parameters, the noncooperative solution usually yields global benefits comparable to those of the cooperative solution. In about one-fifth of these games, however, a second noncooperative solution exists which yields none of the benefits of the cooperative solution. In a game with imperfect information, where the state of nature is uncertain in the first but known in the second of two periods, the expected benefits of the noncooperative solution are 98\% of the expected benefits of the cooperative solution. In contrast to single-agent studies which show little cost to delaying abatement, the benefits of cooperation are usually lost if cooperation is delayed 20 years.},
language = {en},
number = {3},
urldate = {2020-03-13},
journal = {Resource and Energy Economics},
author = {Hammitt, James K and Adams, John L},
month = oct,
year = {1996},
keywords = {Climate change, International agreements, Noncooperative games},
pages = {219--241}
}
@article{hammitt_sequential-decision_1992-1,
title = {A sequential-decision strategy for abating climate change},
volume = {357},
copyright = {1992 Nature Publishing Group},
issn = {1476-4687},
url = {https://www.nature.com/articles/357315a0},
doi = {10.1038/357315a0},
abstract = {CURRENT debate on policies for limiting climate change due to greenhouse-gas emissions focuses on whether to take action now or later, and on how stringent any emissions reductions should be in the near and long term. Any reductions policies implemented now will need to be revised later as scientific understanding of climate change improves. Here we consider the effects of a sequential-decision strategy (Fig. 1) consisting of a near-term period (1992–2002) during which either moderate emissions reductions (achieved by energy conservation only) or aggressive reductions (energy conservation coupled with switching to other fuel sources) are begun, and a subsequent long-term period during which a least-cost abatement policy is followed to limit global mean temperature change to an optimal target ΔT*. For each policy we calculate the global mean surface temperature change ΔT(t) using a simple climate/ocean model for climate sensitivities ΔT2x. (the response to doubled CO2, concentrations) of 4.5,2.5,1.5 and 0.5 °C. The policy beginning with moderate reductions is less expensive than that with aggressive reductions if ΔT*{\textgreater}2.9, 2.1, 1.5 and 0.9 °C respectively; otherwise, the aggressive-reductions policy is cheaper. We suggest that this approach should assist in choosing realistic targets and in determining how best to implement emissions reductions in the short and long term.},
language = {en},
number = {6376},
urldate = {2020-03-13},
journal = {Nature},
author = {Hammitt, James K. and Lempert, Robert J. and Schlesinger, Michael E.},
month = may,
year = {1992},
note = {Number: 6376
Publisher: Nature Publishing Group},
pages = {315--318}
}
@article{peck_uncertainty_1996,
title = {Uncertainty and the {Value} of {Information} with {Stochastic} {Losses} from {Global} {Warming}},
volume = {16},
issn = {1539-6924},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1539-6924.1996.tb01453.x},
doi = {10.1111/j.1539-6924.1996.tb01453.x},
abstract = {We present an uncertainty analysis conducted using CETA-R, a model in which the costs of climate change are specified as Risks of large losses. In this analysis, we assume that three key parameters may each take on “high” or “low” values, leading to eight possible states of the world. We then explore optimal policies when the state of the world is known, and under uncertainty. Also, we estimate the benefits of resolving uncertainty earlier. We find that the optimal policy under uncertainty is similar to the policy that is optimal when each of the key parameters is at its low value. We also find that the value of immediate uncertainty resolution rises sharply as the alternative to immediate resolution is increasingly delayed resolution.},
language = {en},
number = {2},
urldate = {2020-03-13},
journal = {Risk Analysis},
author = {Peck, Stephen C. and Teisberg, Thomas J.},
year = {1996},
note = {\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1111/j.1539-6924.1996.tb01453.x},
keywords = {Global warming, decision-making under uncertainty, integrated assessment, policy analysis, value of information},
pages = {227--235}
}
@article{arrow_determining_2013,
title = {Determining {Benefits} and {Costs} for {Future} {Generations}},
volume = {341},
copyright = {Copyright © 2013, American Association for the Advancement of Science},
issn = {0036-8075, 1095-9203},
url = {https://science.sciencemag.org/content/341/6144/349},
doi = {10.1126/science.1235665},
abstract = {The United States and others should consider adopting a different approach to estimating costs and benefits in light of uncertainty.
The United States and others should consider adopting a different approach to estimating costs and benefits in light of uncertainty.},
language = {en},
number = {6144},
urldate = {2020-03-13},
journal = {Science},
author = {Arrow, K. and Cropper, M. and Gollier, C. and Groom, B. and Heal, G. and Newell, R. and Nordhaus, W. and Pindyck, R. and Pizer, W. and Portney, P. and Sterner, T. and Tol, R. S. J. and Weitzman, M.},
month = jul,
year = {2013},
pmid = {23888025},
note = {Publisher: American Association for the Advancement of Science
Section: Policy Forum},
pages = {349--350}
}
@article{newell_discounting_2003-1,
title = {Discounting the distant future: how much do uncertain rates increase valuations?},
volume = {46},
issn = {0095-0696},
shorttitle = {Discounting the distant future},
url = {http://www.sciencedirect.com/science/article/pii/S0095069602000311},
doi = {10.1016/S0095-0696(02)00031-1},
abstract = {We demonstrate that when the future path of the discount rate is uncertain and highly correlated, the distant future should be discounted at significantly lower rates than suggested by the current rate. We then use two centuries of US interest rate data to quantify this effect. Using both random walk and mean-reverting models, we compute the “certainty-equivalent rate” that summarizes the effect of uncertainty and measures the appropriate forward rate of discount in the future. Under the random walk model we find that the certainty-equivalent rate falls continuously from 4\% to 2\% after 100 years, 1\% after 200 years, and 0.5\% after 300 years. At horizons of 400 years, the discounted value increases by a factor of over 40,000 relative to conventional discounting. Applied to climate change mitigation, we find that incorporating discount rate uncertainty almost doubles the expected present value of mitigation benefits.},
language = {en},
number = {1},
urldate = {2020-03-13},
journal = {Journal of Environmental Economics and Management},
author = {Newell, Richard G. and Pizer, William A.},
month = jul,
year = {2003},
keywords = {Climate policy, Discounting, Interest rate forecasting, Intergenerational equity, Uncertainty},
pages = {52--71}
}
@misc{noauthor_economics_nodate,
title = {The economics of global warming - {William} {R}. {Cline} - {Google} {Books}},
url = {https://books.google.com/books/about/The_economics_of_global_warming.html?id=9UOSAAAAIAAJ},
urldate = {2020-03-13}
}
@book{cline_economics_1992,
title = {The {Economics} of {Global} {Warming}},
isbn = {978-0-88132-132-6},
abstract = {This study examines the costs and benefits of an aggressive program of global action to limit greenhouse warming. An initial chapter summarizes the scientific issues from the standpoint of an economist. The analysis places heavy emphasis on efforts over a long run of 200 to 300 years, with much greater warming and damages than associated with the conventional benchmark (a doubling of carbon dioxide in the atmosphere). Estimates are presented for economic damages, ranging from agricultural losses and sea-level rise to loss of forests, water scarcity, electricity requirements for air conditioning, and several other major effects. A survey of existing model estimates provides the basis for calculation of costs of limiting emissions of greenhouse gases. After a review of the theory of term discounting in the context of very-long-term environmental issues, the study concludes with a cost-benefit estimate for international action and a discussion of policy measures to mobilize the global response.},
publisher = {Peterson Institute for International Economics},
author = {Cline, William R.},
month = jun,
year = {1992},
note = {Pages: 416 Pages}
}
@techreport{cline_economics_1992-1,
type = {Peterson {Institute} {Press}: {All} {Books}},
title = {Economics of {Global} {Warming}, {The}},
url = {https://econpapers.repec.org/bookchap/iieppress/39.htm},
abstract = {This award-winning study examines the costs and benefits of an aggressive program of global action to limit greenhouse warming. An initial chapter summarizes the scientific issues from the standpoint of an economist. The analysis places heavy emphasis on effects over a long run of 200 to 300 years, with much greater warming damages than those associated with the conventional benchmark. * Estimates are presented for economic damages, ranging from agricultural losses and sea level rise to loss of forests, water scarcity, electricity requirements for air conditioning, and several other major effects. The study concludes with a cost- benefit estimate for international action and a discussion of policy measures to mobilize the global response. * Selected by Choice for its 1993 "Outstanding Academic Books" list and winner of the Harold and Margaret Sprout prize for the best book on international environmental affairs, awarded by the International Studies Association.},
urldate = {2020-03-13},
institution = {Peterson Institute for International Economics},
author = {Cline, William},
year = {1992},
note = {ISBN: 9780881321326}
}
@article{ackerman_limitations_2009,
title = {Limitations of integrated assessment models of climate change},
volume = {95},
issn = {1573-1480},
url = {https://doi.org/10.1007/s10584-009-9570-x},
doi = {10.1007/s10584-009-9570-x},
abstract = {The integrated assessment models (IAMs) that economists use to analyze the expected costs and benefits of climate policies frequently suggest that the “optimal” policy is to go slowly and to do relatively little in the near term to reduce greenhouse gas emissions. We trace this finding to the contestable assumptions and limitations of IAMs. For example, they typically discount future impacts from climate change at relatively high rates. This practice may be appropriate for short-term financial decisions but its extension to intergenerational environmental issues rests on several empirically and philosophically controversial hypotheses. IAMs also assign monetary values to the benefits of climate mitigation on the basis of incomplete information and sometimes speculative judgments concerning the monetary worth of human lives and ecosystems, while downplaying scientific uncertainty about the extent of expected damages. In addition, IAMs may exaggerate mitigation costs by failing to reflect the socially determined, path-dependent nature of technical change and ignoring the potential savings from reduced energy utilization and other opportunities for innovation. A better approach to climate policy, drawing on recent research on the economics of uncertainty, would reframe the problem as buying insurance against catastrophic, low-probability events. Policy decisions should be based on a judgment concerning the maximum tolerable increase in temperature and/or carbon dioxide levels given the state of scientific understanding. The appropriate role for economists would then be to determine the least-cost global strategy to achieve that target. While this remains a demanding and complex problem, it is far more tractable and epistemically defensible than the cost-benefit comparisons attempted by most IAMs.},
language = {en},
number = {3},
urldate = {2020-03-13},
journal = {Climatic Change},
author = {Ackerman, Frank and DeCanio, Stephen J. and Howarth, Richard B. and Sheeran, Kristen},
month = aug,
year = {2009},
pages = {297--315}
}
@misc{noauthor_discounting_nodate-1,
title = {Discounting the {Future} - {BROOME} - 1994 - {Philosophy} \& {Public} {Affairs} - {Wiley} {Online} {Library}},
url = {https://onlinelibrary.wiley.com/doi/pdf/10.1111/j.1088-4963.1994.tb00008.x},
urldate = {2020-03-13}
}
@article{ramsey_mathematical_1928,
title = {A {Mathematical} {Theory} of {Saving}},
volume = {38},
issn = {0013-0133},
url = {https://www.jstor.org/stable/2224098},
doi = {10.2307/2224098},
number = {152},
urldate = {2020-03-13},
journal = {The Economic Journal},
author = {Ramsey, F. P.},
year = {1928},
note = {Publisher: [Royal Economic Society, Wiley]},
pages = {543--559}
}
@misc{noauthor_discounting_nodate-2,
title = {Discounting the {Future} - {BROOME} - 1994 - {Philosophy} \& {Public} {Affairs} - {Wiley} {Online} {Library}},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/j.1088-4963.1994.tb00008.x},
urldate = {2020-03-13}
}
@article{solow_economics_1974,
title = {The {Economics} of {Resources} or the {Resources} of {Economics}},
volume = {64},
issn = {0002-8282},
url = {https://www.jstor.org/stable/1816009},
number = {2},
urldate = {2020-03-13},
journal = {The American Economic Review},
author = {Solow, Robert M.},
year = {1974},
note = {Publisher: American Economic Association},
pages = {1--14}
}
@article{dasgupta_alternative_1974,
title = {On some alternative criteria for justice between generations},
volume = {3},
issn = {0047-2727},
url = {http://www.sciencedirect.com/science/article/pii/0047272774900073},
doi = {10.1016/0047-2727(74)90007-3},
abstract = {This paper is concerned with Rawls' principle of just savings. Both the intergenerational maxi–min solution and the Nash equilibrium are analyzed in the context of a simple growth model and a specific preference structure. The results are compared to the Utilitarian solution. The maxi–min solution is intertemporally inconsistent and all the Nash equilibria are Pareto inefficient. The latter part of the paper analyzes intergenarational strong equilibria, the α-core and the β-core. It is shown that for the model in question the set of strong equilibria is empty, and that both the α- and β-cores are roughly speaking equal to the set of all Pareto efficient programmes of accumulation.},
language = {en},
number = {4},
urldate = {2020-03-13},
journal = {Journal of Public Economics},
author = {Dasgupta, Partha},
month = nov,
year = {1974},
pages = {405--423}
}
@article{thompson_systems_2014,
title = {A systems approach to evaluating the air quality co-benefits of {US} carbon policies},
volume = {4},
copyright = {2014 Nature Publishing Group},
issn = {1758-6798},
url = {https://www.nature.com/articles/nclimate2342},
doi = {10.1038/nclimate2342},
abstract = {The near-term costs of greenhouse-gas emissions reduction may be offset by the air-quality co-benefits of mitigation policies. Now research estimates the monetary value of the human health benefits from air-quality improvements due to US carbon abatement policies, and finds that the benefits can offset 26–1,050\% of the cost of mitigation policies.},
language = {en},
number = {10},
urldate = {2020-03-13},
journal = {Nature Climate Change},
author = {Thompson, Tammy M. and Rausch, Sebastian and Saari, Rebecca K. and Selin, Noelle E.},
month = oct,
year = {2014},
note = {Number: 10
Publisher: Nature Publishing Group},
pages = {917--923}
}
@article{solomon_irreversible_2009,
title = {Irreversible climate change due to carbon dioxide emissions},
volume = {106},
copyright = {© 2009 by The National Academy of Sciences of the USA. Freely available online through the PNAS open access option.},
issn = {0027-8424, 1091-6490},
url = {https://www.pnas.org/content/106/6/1704},
doi = {10.1073/pnas.0812721106},
abstract = {The severity of damaging human-induced climate change depends not only on the magnitude of the change but also on the potential for irreversibility. This paper shows that the climate change that takes place due to increases in carbon dioxide concentration is largely irreversible for 1,000 years after emissions stop. Following cessation of emissions, removal of atmospheric carbon dioxide decreases radiative forcing, but is largely compensated by slower loss of heat to the ocean, so that atmospheric temperatures do not drop significantly for at least 1,000 years. Among illustrative irreversible impacts that should be expected if atmospheric carbon dioxide concentrations increase from current levels near 385 parts per million by volume (ppmv) to a peak of 450–600 ppmv over the coming century are irreversible dry-season rainfall reductions in several regions comparable to those of the “dust bowl” era and inexorable sea level rise. Thermal expansion of the warming ocean provides a conservative lower limit to irreversible global average sea level rise of at least 0.4–1.0 m if 21st century CO2 concentrations exceed 600 ppmv and 0.6–1.9 m for peak CO2 concentrations exceeding ≈1,000 ppmv. Additional contributions from glaciers and ice sheet contributions to future sea level rise are uncertain but may equal or exceed several meters over the next millennium or longer.},
language = {en},
number = {6},
urldate = {2020-03-13},
journal = {Proceedings of the National Academy of Sciences},
author = {Solomon, Susan and Plattner, Gian-Kasper and Knutti, Reto and Friedlingstein, Pierre},
month = feb,
year = {2009},
pmid = {19179281},
note = {Publisher: National Academy of Sciences
Section: Physical Sciences},
keywords = {dangerous interference, precipitation, sea level rise, warming},
pages = {1704--1709}
}
@article{gregory_vertical_2000,
title = {Vertical heat transports in the ocean and their effect on time-dependent climate change},
volume = {16},
issn = {1432-0894},
url = {https://doi.org/10.1007/s003820000059},
doi = {10.1007/s003820000059},
abstract = {In response to increasing atmospheric concentrations of greenhouse gases, the rate of time-dependent climate change is determined jointly by the strength of climate feedbacks and the efficiency of processes which remove heat from the surface into the deep ocean. This work examines the vertical heat transport processes in the ocean of the HADCM2 atmosphere–ocean general circulation model (AOGCM) in experiments with CO2 held constant (control) and increasing at 1 per year (anomaly). The control experiment shows that global average heat exchanges between the upper and lower ocean are dominated by the Southern Ocean, where heat is pumped downwards by the wind-driven circulation and diffuses upwards along sloping isopycnals. This is the reverse of the low-latitude balance used in upwelling–diffusion ocean models, the global average upward diffusive transport being against the temperature gradient. In the anomaly experiment, weakened convection at high latitudes leads to reduced diffusive and convective heat loss from the deep ocean, and hence to net heat uptake, since the advective heat input is less affected. Reduction of deep water production at high latitudes results in reduced upwelling of cold water at low latitudes, giving a further contribution to net heat uptake. On the global average, high-latitude processes thus have a controlling influence. The important role of diffusion highlights the need to ensure that the schemes employed in AOGCMs give an accurate representation of the relevant sub-grid-scale processes.},
language = {en},
number = {7},
urldate = {2020-03-08},
journal = {Climate Dynamics},
author = {Gregory, J. M.},
month = jul,
year = {2000},
pages = {501--515}
}
@article{held_probing_2010,
title = {Probing the {Fast} and {Slow} {Components} of {Global} {Warming} by {Returning} {Abruptly} to {Preindustrial} {Forcing}},
volume = {23},
issn = {0894-8755},
url = {https://journals.ametsoc.org/doi/full/10.1175/2009JCLI3466.1},
doi = {10.1175/2009JCLI3466.1},
abstract = {The fast and slow components of global warming in a comprehensive climate model are isolated by examining the response to an instantaneous return to preindustrial forcing. The response is characterized by an initial fast exponential decay with an e-folding time smaller than 5 yr, leaving behind a remnant that evolves more slowly. The slow component is estimated to be small at present, as measured by the global mean near-surface air temperature, and, in the model examined, grows to 0.4°C by 2100 in the A1B scenario from the Special Report on Emissions Scenarios (SRES), and then to 1.4°C by 2300 if one holds radiative forcing fixed after 2100. The dominance of the fast component at present is supported by examining the response to an instantaneous doubling of CO2 and by the excellent fit to the model’s ensemble mean twentieth-century evolution with a simple one-box model with no long times scales.},
number = {9},
urldate = {2020-03-08},
journal = {Journal of Climate},
author = {Held, Isaac M. and Winton, Michael and Takahashi, Ken and Delworth, Thomas and Zeng, Fanrong and Vallis, Geoffrey K.},
month = jan,
year = {2010},
pages = {2418--2427}
}
@article{gregory_transient_2008,
title = {Transient climate response estimated from radiative forcing and observed temperature change},
volume = {113},
issn = {2156-2202},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2008JD010405},
doi = {10.1029/2008JD010405},
abstract = {Observations and simulations (using the HadCM3 AOGCM) of time-dependent twentieth-century climate change indicate a linear relationship F = ρΔT between radiative forcing F and global mean surface air temperature change ΔT. The same is a good description of ΔT from CMIP3 AOGCMs integrated with CO2 increasing at 1\% per year compounded. The constant “climate resistance” ρ is related to the transient climate response (TCR, ΔT at the time of doubled CO2 under the 1\% CO2 scenario). Disregarding any trend caused by natural forcing (volcanic and solar), which is small compared with the trend in anthropogenic forcing, we estimate that the real-world TCR is 1.3–2.3 K (5–95\% uncertainty range) from the data of 1970–2006, allowing for the effect of unforced variability on longer timescales. The climate response to episodic volcanic forcing cannot be described by the same relationship and merits further investigation; this constitutes a systematic uncertainty of the method. The method is quite insensitive to the anthropogenic aerosol forcing, which probably did not vary much during 1970–2006 and therefore did not affect the trend in ΔT. Our range is very similar to the range of recent AOGCM results for the TCR. Consequently projections for warming during the twenty-first century under the SRES A1B emissions scenario made using the simple empirical relationship F = ρΔT agree with the range of AOGCM results for that scenario. Our TCR range is also similar to those from observationally constrained model-based methods.},
language = {en},
number = {D23},
urldate = {2020-03-08},
journal = {Journal of Geophysical Research: Atmospheres},
author = {Gregory, J. M. and Forster, P. M.},
year = {2008},
keywords = {Transient, climate, response}
}
@article{glotter_simple_2014,
title = {A simple carbon cycle representation for economic and policy analyses},
volume = {126},
issn = {1573-1480},
url = {https://doi.org/10.1007/s10584-014-1224-y},
doi = {10.1007/s10584-014-1224-y},
abstract = {Integrated Assessment Models (IAMs) that couple the climate system and the economy require a representation of ocean CO2 uptake to translate human-produced emissions to atmospheric concentrations and in turn to climate change. The simple linear carbon cycle representations in most IAMs are not however physical at long timescales, since ocean carbonate chemistry makes CO2 uptake highly nonlinear. No linearized representation can capture the ocean’s dual-mode behavior, with initial rapid uptake and then slow equilibration over ∽10,000 years. In a business-as-usual scenario followed by cessation of emissions, the carbon cycle in the 2007 version of the most widely used IAM, DICE (Dynamic Integrated model of Climate and the Economy), produces errors of ∽2∘C by the year 2300 and ∽6∘C by the year 3500. We suggest here a simple alternative representation that captures the relevant physics and show that it reproduces carbon uptake in several more complex models to within the inter-model spread. The scheme involves little additional complexity over the DICE model, making it a useful tool for economic and policy analyses.},
language = {en},
number = {3},
urldate = {2020-03-08},
journal = {Climatic Change},
author = {Glotter, Michael J. and Pierrehumbert, Raymond T. and Elliott, Joshua W. and Matteson, Nathan J. and Moyer, Elisabeth J.},
month = oct,
year = {2014},
pages = {319--335}
}
@misc{noauthor_zotero_nodate,
title = {Zotero {\textbar} {Your} personal research assistant},
url = {https://www.zotero.org/},
urldate = {2020-03-08}
}
@article{walin_relation_1982,
title = {On the relation between sea-surface heat flow and thermal circulation in the ocean},
volume = {34},
issn = {0040-2826},
url = {https://doi.org/10.3402/tellusa.v34i2.10801},
doi = {10.3402/tellusa.v34i2.10801},
abstract = {A theoretical framework for the description of the thermal state and circulation in the ocean is presented. Relations between differential heating, diffusive heat flux and surface drift are derived and discussed in the light of the basic mechanical property of the system, i.e. the tendency for light water to spread on top of heavier water. We find that for low and medium temperatures, the poleward surface drift can be determined directly from a knowledge of the heat flux through the sea surface. Preliminary quantitative estimates indicate that the “Hadley circulation” for the entire ocean involves a volume flux of order 70 ± 30 Sverdrup. For the North Atlantic we find a poleward drift of order 10 Sverdrup which compares well with previous estimates of the southward deep flow. Mean values for the diffusive flux in the ocean are found to be of order 20 W m-2 at 15°C and 60 W m-2 at 25°C.},
number = {2},
urldate = {2020-02-10},
journal = {Tellus},
author = {Walin, GöSta},
month = jan,
year = {1982},
pages = {187--195}
}
@article{gill_hydraulics_1977,
title = {The hydraulics of rotating-channel flow},
volume = {80},
issn = {1469-7645, 0022-1120},
url = {https://www.cambridge.org/core/journals/journal-of-fluid-mechanics/article/hydraulics-of-rotatingchannel-flow/9F60E4E7F489AB8B0E6A29F722F1913F},
doi = {10.1017/S0022112077002407},
abstract = {Flow of a homogeneous inviscid fluid down a rotating channel of slowly varying crosssection is considered, with particular reference to conditions under which the flow is ‘hydraulically controlled’. This problem is a member of a general class of problems of which gas flow through a nozzle and flow over a broad-crested weir are examples (Binnie 1949). A general discussion of such problems gives the means for determining the position of the control section (which is generally flow dependent) and shows that at this position there always exist long-wave disturbances with zero phase speed (i.e. disturbances are always ‘critical’ at the control section). The general theory is applied to the rotating-channel problem for the case of uniform potential vorticity. For this problem, three parameters are needed to specify the upstream flow, and the control theory gives a relationship between these parameters which depends on the geometry of the channel.},
language = {en},
number = {4},
urldate = {2020-01-29},
journal = {Journal of Fluid Mechanics},
author = {Gill, A. E.},
month = may,
year = {1977},
pages = {641--671}
}
@book{pratt_rotating_2008,
title = {Rotating {Hydraulics}: {Nonlinear} {Topographic} {Effects} in the {Ocean} and {Atmosphere}},
shorttitle = {Rotating {Hydraulics}},
author = {Pratt, Larry and Whitehead, J. A},
month = jan,
year = {2008}
}
@article{pratt_hydraulic_1987,
title = {Hydraulic {Control} of {Flows} with {Nonuniform} {Potential} {Vorticity}},
volume = {17},
issn = {0022-3670},
url = {https://journals.ametsoc.org/doi/abs/10.1175/1520-0485(1987)017%3C2016:HCOFWN%3E2.0.CO%3B2},
doi = {10.1175/1520-0485(1987)017<2016:HCOFWN>2.0.CO;2},
abstract = {The hydraulics of flow contained in a channel and having nonuniform potential vorticity is considered from a general standpoint. The channel cross section is rectangular and the potential vorticity is assumed to be prescribed in terms of the streamfunction. We show that the general computational problem can be expressed in two traditional forms, the first of which consists of an algebraic relation between the channel geometry and a single dependent flow variable and the second of which consists of a pair of quasi-linear differential equations relating the geometry to two dependent flow variables. From these forms we derive a general “branch condition” indicating a merger of different solutions having the same flow rate and energy and show that this condition implies that the flow is critical with respect to a certain long wave. It is shown that critical flow can occur only at the sill in a channel of constant width (with one exception) at a point of width extremum in a flat bottom channel. We also discuss the situation in which the fluid becomes detached from one of sidewalls. An example is given in which the potential vorticity is a linear function of the streamfunction and the rotation rate is zero, a case which can be solved analytically. When the potential vorticity gradient points downstream, allowing propagation of potential vorticity waves against the flow, multiple pairs of steady states are possible, each having a unique modal structure. Critical control of the higher-mode solutions is primarily over vorticity, rather than depth. Flow reversals arise in some situations, possible invalidating the prescription of potential vorticity.},
number = {11},
urldate = {2020-01-29},
journal = {Journal of Physical Oceanography},
author = {Pratt, Lawrence J. and Armi, Laurence},
month = nov,
year = {1987},
pages = {2016--2029}
}
@article{willmott_advantages_2005,
title = {Advantages of the mean absolute error ({MAE}) over the root mean square error ({RMSE}) in assessing average model performance},
volume = {30},
issn = {0936-577X, 1616-1572},
url = {https://www.int-res.com/abstracts/cr/v30/n1/p79-82/},
doi = {10.3354/cr030079},
abstract = {The relative abilities of 2, dimensioned statistics—the root-mean-square error (RMSE) and the mean absolute error (MAE)—to describe average model-performance error are examined. The RMSE is of special interest because it is widely reported in the climatic and environmental literature; nevertheless, it is an inappropriate and misinterpreted measure of average error. RMSE is inappropriate because it is a function of 3 characteristics of a set of errors, rather than of one (the average error). RMSE varies with the variability within the distribution of error magnitudes and with the square root of the number of errors (n1/2), as well as with the average-error magnitude (MAE). Our findings indicate that MAE is a more natural measure of average error, and (unlike RMSE) is unambiguous. Dimensioned evaluations and inter-comparisons of average model-performance error, therefore, should be based on MAE.},
language = {en},
number = {1},
urldate = {2019-12-16},
journal = {Climate Research},
author = {Willmott, Cort J. and Matsuura, Kenji},
month = dec,
year = {2005},
keywords = {Mean absolute error, Model-performance measures, Root-mean-square error},
pages = {79--82}
}
@article{cimoli_sensitivity_2019,
title = {Sensitivity of deep ocean mixing to local internal tide breaking and mixing efficiency},
volume = {n/a},
copyright = {This article is protected by copyright. All rights reserved.},
issn = {1944-8007},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019GL085056},
doi = {10.1029/2019GL085056},
abstract = {There have been recent advancements in the quantification of parameters describing the proportion of internal tide energy being dissipated locally and the “efficiency” of diapycnal mixing, i.e. the ratio of the diapycnal mixing rate to the kinetic energy dissipation rate. We show that oceanic tidal mixing is non-trivially sensitive to the co-variation of these parameters. Varying these parameters one at the time can lead to significant errors in the patterns of diapycnal mixing driven upwelling and downwelling, and to the over and under estimation of mixing in such a way that the net rate of globally-integrated deep circulation appears reasonable. However, the local rates of upwelling and downwelling in the deep ocean are significantly different when both parameters are allowed to co-vary and be spatially variable. These findings have important implications for the representation of oceanic heat, carbon, nutrients and other tracer budgets in general circulation models.},
language = {en},
number = {n/a},
urldate = {2019-12-13},
journal = {Geophysical Research Letters},
author = {Cimoli, Laura and Caulfield, Colm-cille P. and Johnson, Helen L. and Marshall, David P. and Mashayek, Ali and Garabato, Alberto C. Naveira and Vic, Clément},
month = dec,
year = {2019}
}
@article{nikurashin_global_2011,
title = {Global energy conversion rate from geostrophic flows into internal lee waves in the deep ocean},
volume = {38},
copyright = {Copyright 2011 by the American Geophysical Union.},
issn = {1944-8007},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2011GL046576},
doi = {10.1029/2011GL046576},
abstract = {A global estimate of the energy conversion rate from geostrophic flows into internal lee waves in the ocean is presented. The estimate is based on a linear theory applied to bottom topography at O(1–10) km scales obtained from single beam echo soundings, to bottom stratification estimated from climatology, and to bottom velocity obtained from a global ocean model. The total energy flux into internal lee waves is estimated to be 0.2 TW which is 20\% of the global wind power input into the ocean. The geographical distribution of the energy flux is largest in the Southern Ocean which accounts for half of the total energy flux. The results suggest that the generation of internal lee waves at rough topography is a significant energy sink for the geostrophic flows as well as an important energy source for internal waves and the associated turbulent mixing in the deep ocean.},
language = {en},
number = {8},
urldate = {2019-12-02},
journal = {Geophysical Research Letters},
author = {Nikurashin, Maxim and Ferrari, Raffaele},
year = {2011},
keywords = {abyssal mixing, geostrophic eddies, internal waves, lee waves, overturning circulation, topographic waves}
}
@article{nycander_generation_2005,
title = {Generation of internal waves in the deep ocean by tides},
volume = {110},
copyright = {Copyright 2005 by the American Geophysical Union.},
issn = {2156-2202},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2004JC002487},
doi = {10.1029/2004JC002487},
abstract = {A direct computation of the tidal generation of internal waves over the global ocean is presented. It is based on linear wave theory and high-resolution data for the bottom topography. The geographical distribution of the energy flux from tides to internal waves is determined with a spatial resolution of a few kilometers. The total flux over the area with a depth greater than 500 m is found to be 1.2 TW. The greatest uncertainties of the computation are due to unresolved topography and to nonlinear effects caused by supercritical bottom slope.},
language = {en},
number = {C10},
urldate = {2019-12-02},
journal = {Journal of Geophysical Research: Oceans},
author = {Nycander, J.},
year = {2005},
keywords = {internal waves, tides, vertical mixing}
}
@article{holden_plasimgenie_2016,
title = {{PLASIM}–{GENIE} v1.0: a new intermediate complexity {AOGCM}},
volume = {9},
issn = {1991-959X},
shorttitle = {{PLASIM}–{GENIE} v1.0},
url = {https://www.geosci-model-dev.net/9/3347/2016/},
doi = {https://doi.org/10.5194/gmd-9-3347-2016},
abstract = {{\textless}p{\textgreater}{\textless}strong{\textgreater}Abstract.{\textless}/strong{\textgreater} We describe the development, tuning and climate of Planet Simulator (PLASIM)–Grid-ENabled Integrated Earth system model (GENIE), a new intermediate complexity Atmosphere–Ocean General Circulation Model (AOGCM), built by coupling the Planet Simulator to the ocean, sea-ice and land-surface components of the GENIE Earth system model. PLASIM–GENIE supersedes GENIE-2, a coupling of GENIE to the Reading Intermediate General Circulation Model (IGCM). The primitive-equation atmosphere includes chaotic, three-dimensional (3-D) motion and interactive radiation and clouds, and dominates the computational load compared to the relatively simpler frictional-geostrophic ocean, which neglects momentum advection. The model is most appropriate for long-timescale or large ensemble studies where numerical efficiency is prioritised, but lack of data necessitates an internally consistent, coupled calculation of both oceanic and atmospheric fields. A 1000-year simulation with PLASIM–GENIE requires approximately 2 weeks on a single node of a 2.1 GHz AMD 6172 CPU. We demonstrate the tractability of PLASIM–GENIE ensembles by deriving a subjective tuning of the model with a 50-member ensemble of 1000-year simulations. The simulated climate is presented considering (i) global fields of seasonal surface air temperature, precipitation, wind, solar and thermal radiation, with comparisons to reanalysis data; (ii) vegetation carbon, soil moisture and aridity index; and (iii) sea surface temperature, salinity and ocean circulation. Considering its resolution, PLASIM–GENIE reproduces the main features of the climate system well and demonstrates usefulness for a wide range of applications.{\textless}/p{\textgreater}},
language = {English},
number = {9},
urldate = {2019-11-18},
journal = {Geoscientific Model Development},
author = {Holden, Philip B. and Edwards, Neil R. and Fraedrich, Klaus and Kirk, Edilbert and Lunkeit, Frank and Zhu, Xiuhua},
month = sep,
year = {2016},
pages = {3347--3361}
}
@article{gent_isopycnal_1990,
title = {Isopycnal {Mixing} in {Ocean} {Circulation} {Models}},
volume = {20},
issn = {0022-3670},
url = {http://journals.ametsoc.org/doi/abs/10.1175/1520-0485(1990)020%3C0150:IMIOCM%3E2.0.CO;2},
doi = {10.1175/1520-0485(1990)020<0150:IMIOCM>2.0.CO;2},
abstract = {Abstract A subgrid-scale form for mesoscale eddy mixing on isopycnal surfaces is proposed for use in non-eddy-resolving ocean circulation models. The mixing is applied in isopycnal coordinates to isopycnal layer thickness, or inverse density gradient, as well as to passive scalars, temperature and salinity. The transformation of these mixing forms to physical coordinates is also presented.},
number = {1},
journal = {Journal of Physical Oceanography},
author = {Gent, Peter R. and McWilliams, James C.},
year = {1990},
note = {ISBN: 0022-3670},
pages = {150--155}
}
@article{stommel_abyssal_1959,
title = {On the abyssal circulation of the world ocean — {II}. {An} idealized model of the circulation pattern and amplitude in oceanic basins},
volume = {6},
issn = {0146-6313},
url = {http://www.sciencedirect.com/science/article/pii/0146631359900759},
doi = {10.1016/0146-6313(59)90075-9},
abstract = {Stationary planetary flow patterns driven by source sink distributions in an ocean on the rotating earth were developed in Part I. These theoretical results are now used to construct a highly idealized model of the general abyssal circulation of the world ocean. The model is based on the postulation of the existence of two concentrated sources of abyssal waters (one in the North Atlantic and another in the Weddell Sea) and on a uniformly distributed upward flux of water from the abyssal to the upper layers as part of the mechanism of the main oceanic thermocline. Order of magnitude calculations based on this model lead to a variety of estimates of the time in which the deep water is replaced (from every 200 to 1800 years). Comparison of leading terms in the dynamical equations and equation describing the flux divergence of a tracer shows that there is a large range of lateral eddy coefficient which will influence the distribution of the tracer but not affect the dynamically determined planetary flow patterns.},
urldate = {2019-07-18},
journal = {Deep Sea Research (1953)},
author = {Stommel, Henry and Arons, A. B.},
month = jan,
year = {1959},
pages = {217--233}
}
@article{stommel_abyssal_1959-1,
title = {On the abyssal circulation of the world ocean—{I}. {Stationary} planetary flow patterns on a sphere},
volume = {6},
issn = {0146-6313},
url = {http://www.sciencedirect.com/science/article/pii/0146631359900656},
doi = {10.1016/0146-6313(59)90065-6},
abstract = {A treatment of stationary planetary flow patterns driven by source-sink distributions in a cylindrical tank (Stommel et al., 1958) is extended to predict flow patterns which might be expected under similar circumstances on a rotating sphere. Flow patterns are sketched for various source-sink distributions and meridional and zonal boundary conditions.},
urldate = {2019-09-18},
journal = {Deep Sea Research (1953)},
author = {Stommel, Henry and Arons, A. B.},
month = jan,
year = {1959},
pages = {140--154}
}
@article{hogg_transport_1982,
title = {On the {Transport} and {Modification} of {Antarctic} {Bottom} {Water} in the {Vema} {Channel}},
volume = {40},
abstract = {The Vema Channel is a deep passage across the Rio Grande Rise in the South Atlantic through which Antarctic Bottom Water (AABW) must flow on its way northward from the Argentine Basin to the Brazil Basin and eventually into the North Atlantic. Both dynamic computation and direct current measurement based on recently acquired data indicate that the volume transport of AABW is about 4 x 10(6) m(3)/sec northward with a standard deviation of about 1.2 x 10(6) m(3)/sec. There are no known exits for AABW below 1 degrees C out of the Brazil Basin and it is estimated by heat flux balance that if AABW leaves this basin across isopycnals, a diffusion rate of 3-4 cm(2)/sec so directed is required. There is a sharp water mass transition between the two basins across the Rio Grande Rise with AABW in the Argentine Basin being distinctively fresher and colder in the potential temperature range from 0.2 degrees C to 2.0 degrees C at the same density. Cold tongues of fresh water advected into the Vema Channel may be thoroughly mixed by lateral eddy diffusion at a rate estimated to be 4 x 10(6) cm(2)/sec. This process demands a supply of Brazil Basin AABW from the north consistent with an observed weak southward flow to the east of the much more intense northward jet. Isopycnals show a reversal in slope with depth within the channel (but not outside) such that the coldest water is in the west at shallow AABW depths but in the east near the. bottom. East of the channel axis there are thick bottom boundary layers which are nearly homogeneous in the vertical but horizontally stratified. We suggest a dynamical, nonmixing, mechanism for producing these features. Dissolved silicate measurements reveal a filament of low concentration, presumably North Atlantic Deep Water, which is located over the channel axis at 2500 m depth. This is some 1000 m above the Rio Grande Rise and 2000 m above the Channel floor.},
journal = {J. Mar. Res.},
author = {Hogg, Nelson and Biscaye, P.E. and Gardner, Wilford and Jr, W},
month = jan,
year = {1982},
pages = {231--263}
}
@article{nikurashin_radiation_2009,
title = {Radiation and {Dissipation} of {Internal} {Waves} {Generated} by {Geostrophic} {Motions} {Impinging} on {Small}-{Scale} {Topography}: {Theory}},
volume = {40},
issn = {0022-3670},
shorttitle = {Radiation and {Dissipation} of {Internal} {Waves} {Generated} by {Geostrophic} {Motions} {Impinging} on {Small}-{Scale} {Topography}},
url = {https://journals.ametsoc.org/doi/full/10.1175/2009JPO4199.1},
doi = {10.1175/2009JPO4199.1},
abstract = {Observations and inverse models suggest that small-scale turbulent mixing is enhanced in the Southern Ocean in regions above rough topography. The enhancement extends O(1) km above the topography, suggesting that mixing is supported by the breaking of gravity waves radiated from the ocean bottom. In this study, it is shown that the observed mixing rates can be sustained by internal waves generated by geostrophic motions flowing over bottom topography. Weakly nonlinear theory is used to describe the internal wave generation and the feedback of the waves on the zonally averaged flow. Vigorous inertial oscillations are driven at the ocean bottom by waves generated at steep topography. The wave radiation and dissipation at equilibrium is therefore the result of both geostrophic flow and inertial oscillations differing substantially from the classical lee-wave problem. The theoretical predictions are tested versus two-dimensional high-resolution numerical simulations with parameters representative of Drake Passage. This work suggests that mixing in Drake Passage can be supported by geostrophic motions impinging on rough topography rather than by barotropic tidal motions, as is commonly assumed.},
number = {5},
urldate = {2019-09-25},
journal = {Journal of Physical Oceanography},
author = {Nikurashin, Maxim and Ferrari, Raffaele},
month = nov,
year = {2009},
pages = {1055--1074}
}
@article{young_exact_2011,
title = {An {Exact} {Thickness}-{Weighted} {Average} {Formulation} of the {Boussinesq} {Equations}},
volume = {42},
issn = {0022-3670},
url = {https://journals.ametsoc.org/doi/full/10.1175/JPO-D-11-0102.1},
doi = {10.1175/JPO-D-11-0102.1},
abstract = {The author shows that a systematic application of thickness-weighted averaging to the Boussinesq equations of motion results in averaged equations of motion written entirely in terms of the thickness-weighted velocity; that is, the unweighted average velocity and the eddy-induced velocity do not appear in the averaged equations of motion. This thickness-weighted average (TWA) formulation is identical to the unaveraged equations, apart from eddy forcing by the divergence of three-dimensional Eliassen–Palm (EP) vectors in the two horizontal momentum equations. These EP vectors are second order in eddy amplitude and, moreover, the EP divergences can be expressed in terms of the eddy flux of the Rossby–Ertel potential vorticity derived from the TWA equations of motion. That is, there is a fully nonlinear and three-dimensional generalization of the one- and two-dimensional identities found by Taylor and Bretherton. The only assumption required to obtain this exact TWA formulation is that the buoyancy field is stacked vertically; that is, that the buoyancy frequency is never zero. Thus, the TWA formulation applies to nonrotating stably stratified turbulent flows, as well as to large-scale rapidly rotating flows. Though the TWA formulation is obtained by working on the equations of motion in buoyancy coordinates, the averaged equations of motion can then be translated into Cartesian coordinates, which is the most useful representation for many purposes.},
number = {5},
urldate = {2019-09-23},
journal = {Journal of Physical Oceanography},
author = {Young, William R.},
month = nov,
year = {2011},
pages = {692--707}
}
@article{salmun_two-dimensional_1991,
title = {A two-dimensional model of boundary mixing},
volume = {96},
copyright = {Copyright 1991 by the American Geophysical Union.},
issn = {2156-2202},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/91JC01917},
doi = {10.1029/91JC01917},
abstract = {The steady flow in and near a turbulent boundary layer on a sloping boundary is considered, for a nonrotating stratified fluid. It is shown that the effect of nonuniform stratification in the interior (i.e., away from the turbulent layer) is to induce a convergent-divergent flow in the boundary layer which results in an inflow or outflow to the interior. This inflow-outflow acts in a divergent-convergent manner on the interior isopycnals. Use of a filling-box argument for the slow time variation of the interior then produces an approximate expression for an effective interior vertical diffusivity. The parameter dependence of this diffusivity is very strong; however, with realistic geophysical values (subject to the omission of rotation), e.g., a mixed layer depth of 50 m, Munk's canonical value of 10−4 m2 s−1 can easily be achieved. This work thus supports the view that boundary mixing is likely to be an important process in setting the vertical density profile in the ocean.},
language = {en},
number = {C10},
urldate = {2019-09-23},
journal = {Journal of Geophysical Research: Oceans},
author = {Salmun, Haydee and Killworth, Peter D. and Blundell, Jeffrey R.},
year = {1991},
pages = {18447--18474}
}
@article{salmon_two-layer_1992,
title = {A two-layer {Gulf} {Stream} over a continental slope},
volume = {50},
issn = {00222402, 15439542},
url = {http://openurl.ingenta.com/content/xref?genre=article&issn=0022-2402&volume=50&issue=3&spage=341},
doi = {10.1357/002224092784797610},
abstract = {We consider the two-layer form of the planetary geostrophic equations, in which a simple Rayleigh friction replaces the inertia, on a western continental slope. In the frictionless limit, these equations can be written as characteristic equations in which the potential vorticities of the top and bottom layers play the role of Riemann invariants. The general solution is of two types. In the first type, the characteristics can cross, and friction is required to resolve the resulting shocks. In the second type, one of the two Riemann invariants is uniform, the remaining characteristic is a line of constantftH, and the solutions take a simple explicit form. A solution resembling the Gulf Stream can be formed by combining three solutions of the second type. Compared to the corresponding solution for homogeneous fluid, the Gulf Stream and its seaward countercurrent are stronger, and the latter is concentrated in a thin frictional layer on the eastern edge of the Stream.},
language = {en},
number = {3},
urldate = {2019-09-23},
journal = {Journal of Marine Research},
author = {Salmon, Rick},
month = aug,
year = {1992},
pages = {341--365}
}
@book{grinfeld_introduction_2013,
address = {New York},
title = {Introduction to {Tensor} {Analysis} and the {Calculus} of {Moving} {Surfaces}},
isbn = {978-1-4614-7866-9},
url = {https://www.springer.com/gp/book/9781461478669},
abstract = {This textbook is distinguished from other texts on the subject by the depth of the presentation and the discussion of the calculus of moving surfaces, which is an extension of tensor calculus to deforming manifolds. Designed for advanced undergraduate and graduate students, this text invites its audience to take a fresh look at previously learned material through the prism of tensor calculus. Once the framework is mastered, the student is introduced to new material which includes differential geometry on manifolds, shape optimization, boundary perturbation and dynamic fluid film equations. The language of tensors, originally championed by Einstein, is as fundamental as the languages of calculus and linear algebra and is one that every technical scientist ought to speak. The tensor technique, invented at the turn of the 20th century, is now considered classical. Yet, as the author shows, it remains remarkably vital and relevant. The author’s skilled lecturing capabilities are evident by the inclusion of insightful examples and a plethora of exercises. A great deal of material is devoted to the geometric fundamentals, the mechanics of change of variables, the proper use of the tensor notation and the discussion of the interplay between algebra and geometry. The early chapters have many words and few equations. The definition of a tensor comes only in Chapter 6 – when the reader is ready for it. While this text maintains a consistent level of rigor, it takes great care to avoid formalizing the subject. The last part of the textbook is devoted to the Calculus of Moving Surfaces. It is the first textbook exposition of this important technique and is one of the gems of this text. A number of exciting applications of the calculus are presented including shape optimization, boundary perturbation of boundary value problems and dynamic fluid film equations developed by the author in recent years. Furthermore, the moving surfaces framework is used to offer new derivations of classical results such as the geodesic equation and the celebrated Gauss-Bonnet theorem.},
language = {en},
urldate = {2019-09-20},
publisher = {Springer-Verlag},
author = {Grinfeld, Pavel},
year = {2013}
}
@article{bezanson_julia:_2017,
title = {Julia: {A} {Fresh} {Approach} to {Numerical} {Computing}},
volume = {59},
issn = {0036-1445},
shorttitle = {Julia},
url = {https://epubs.siam.org/doi/10.1137/141000671},
doi = {10.1137/141000671},
abstract = {Bridging cultures that have often been distant, Julia combines expertise from the diverse fields of computer science and computational science to create a new approach to numerical computing. Julia is designed to be easy and fast and questions notions generally held to be “laws of nature" by practitioners of numerical computing: {\textbackslash}beginlist {\textbackslash}item High-level dynamic programs have to be slow. {\textbackslash}item One must prototype in one language and then rewrite in another language for speed or deployment. {\textbackslash}item There are parts of a system appropriate for the programmer, and other parts that are best left untouched as they have been built by the experts. {\textbackslash}endlist We introduce the Julia programming language and its design---a dance between specialization and abstraction. Specialization allows for custom treatment. Multiple dispatch, a technique from computer science, picks the right algorithm for the right circumstance. Abstraction, which is what good computation is really about, recognizes what remains the same after differences are stripped away. Abstractions in mathematics are captured as code through another technique from computer science, generic programming. Julia shows that one can achieve machine performance without sacrificing human convenience.},
number = {1},
urldate = {2019-09-20},
journal = {SIAM Review},
author = {Bezanson, J. and Edelman, A. and Karpinski, S. and Shah, V.},
month = jan,
year = {2017},
pages = {65--98}
}
@article{hines_role_2019,
title = {The {Role} of the {Southern} {Ocean} in {Abrupt} {Transitions} and {Hysteresis} in {Glacial} {Ocean} {Circulation}},
volume = {34},
copyright = {©2019. American Geophysical Union. All Rights Reserved.},
issn = {2572-4525},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2018PA003415},
doi = {10.1029/2018PA003415},
abstract = {High-latitude Northern Hemisphere climate during the last glacial period was characterized by a series of abrupt climate changes, known as Dansgaard-Oeschger events, which were recorded in Greenland ice cores as shifts in the oxygen isotopic composition of the ice. These shifts in inferred Northern Hemisphere high-latitude temperature have been linked to changes in Atlantic meridional overturning strength. The response of ocean overturning circulation to forcing is nonlinear and a hierarchy of models have suggested that it may exist in multiple steady state configurations. Here, we use a time-dependent coarse-resolution isopycnal model with four density classes and two basins, linked by a Southern Ocean to explore overturning states and their stability to changes in external parameters. The model exhibits hysteresis in both the steady state stratification and overturning strength as a function of the magnitude of North Atlantic Deep Water formation. Hysteresis occurs as a result of two nonlinearities in the model—the surface buoyancy distribution in the Southern Ocean and the vertical diffusivity profile in the Atlantic and Indo-Pacific basins. We construct a metric to assess circulation configuration in the model, motivated by observations from the Last Glacial Maximum, which show a different circulation structure from the modern. We find that circulation configuration is primarily determined by North Atlantic Deep Water density. The model results are used to suggest how ocean conditions may have influenced the pattern of Dansgaard-Oeschger events across the last glacial cycle.},
language = {en},
number = {4},
urldate = {2019-09-18},
journal = {Paleoceanography and Paleoclimatology},
author = {Hines, Sophia K. V. and Thompson, Andrew F. and Adkins, Jess F.},
year = {2019},
keywords = {abrupt climate change, glacial ocean circulation, hysteresis, ocean circulation},
pages = {490--510}
}
@misc{noauthor_note_nodate,
title = {A {Note} on the {Western} {Intensification} of the {Oceanic} {Circulation}. {\textbar} {National} {Technical} {Reports} {Library} - {NTIS}},
url = {https://ntrl.ntis.gov/NTRL/dashboard/searchResults/titleDetail/AD641732.xhtml},
urldate = {2019-09-09}
}
@article{abernathey_diagnostics_2013,
title = {Diagnostics of isopycnal mixing in a circumpolar channel},
volume = {72},
issn = {1463-5003},
url = {http://www.sciencedirect.com/science/article/pii/S1463500313001200},
doi = {10.1016/j.ocemod.2013.07.004},
abstract = {Mesoscale eddies mix tracers along isopycnals and horizontally at the sea surface. This paper compares different methods of diagnosing eddy mixing rates in an idealized, eddy-resolving model of a channel flow meant to resemble the Antarctic Circumpolar Current. The first set of methods, the “perfect” diagnostics, are techniques suitable only to numerical models, in which detailed synoptic data is available. The perfect diagnostic include flux-gradient diffusivities of buoyancy, QGPV, and Ertel PV; Nakamura effective diffusivity; and the four-element diffusivity tensor calculated from an ensemble of passive tracers. These diagnostics reveal a consistent picture of isopycnal mixing by eddies, with a pronounced maximum near 1000m depth. The isopycnal diffusivity differs from the buoyancy diffusivity, a.k.a. the Gent–McWilliams transfer coefficient, which is weaker and peaks near the surface and bottom. The second set of methods are observationally “practical” diagnostics. They involve monitoring the spreading of tracers or Lagrangian particles in ways that are plausible in the field. We show how, with sufficient ensemble size, the practical diagnostics agree with the perfect diagnostics in an average sense. Some implications for eddy parameterization are discussed.},
urldate = {2019-09-08},
journal = {Ocean Modelling},
author = {Abernathey, Ryan and Ferreira, David and Klocker, Andreas},
month = dec,
year = {2013},
keywords = {Antarctic Circumpolar Current, Eddy diffusivity, Isopycnal mixing, Mesoscale eddies},
pages = {1--16}
}
@book{salmon_lectures_1998,
title = {Lectures on {Geophysical} {Fluid} {Dynamics}},
isbn = {978-0-19-535532-1},
abstract = {Lectures on Geophysical Fluid Dynamics offers an introduction to several topics in geophysical fluid dynamics, including the theory of large-scale ocean circulation, geostrophic turbulence, and Hamiltonian fluid dynamics. Since each chapter is a self-contained introduction to its particular topic, the book will be useful to students and researchers in diverse scientific fields.},
language = {en},
publisher = {Oxford University Press},
author = {Salmon, Rick},
month = feb,
year = {1998},
note = {Google-Books-ID: r0Afu2k1bmQC},
keywords = {Science / Earth Sciences / Geology, Science / Earth Sciences / Hydrology, Science / Earth Sciences / Meteorology \& Climatology, Science / Earth Sciences / Oceanography}
}
@article{mcdougall_abyssal_2016,
title = {Abyssal {Upwelling} and {Downwelling} {Driven} by {Near}-{Boundary} {Mixing}},
volume = {47},
issn = {0022-3670},
url = {https://journals.ametsoc.org/doi/full/10.1175/JPO-D-16-0082.1},
doi = {10.1175/JPO-D-16-0082.1},
abstract = {A buoyancy and volume budget analysis of bottom-intensified mixing in the abyssal ocean reveals simple expressions for the strong upwelling in very thin continental boundary layers and the interior near-boundary downwelling in the stratified ocean interior. For a given amount of Antarctic Bottom Water that is upwelled through neutral density surfaces in the abyssal ocean (between 2000 and 5000 m), up to 5 times this volume flux is upwelled in narrow, turbulent, sloping bottom boundary layers, while up to 4 times the net upward volume transport of Bottom Water flows downward across isopycnals in the near-boundary stratified ocean interior. These ratios are a direct result of a buoyancy budget with respect to buoyancy surfaces, and these ratios are calculated from knowledge of the stratification in the abyss along with the assumed e-folding height that characterizes the decrease of the magnitude of the turbulent diapycnal buoyancy flux away from the seafloor. These strong diapycnal upward and downward volume transports are confined to a few hundred kilometers of the continental boundaries, with no appreciable diapycnal motion in the bulk of the interior ocean.},
number = {2},
urldate = {2019-08-12},
journal = {Journal of Physical Oceanography},
author = {McDougall, Trevor J. and Ferrari, Raffaele},
month = nov,
year = {2016},
pages = {261--283}
}
@article{greatbatch_parameterizing_1990,
title = {On {Parameterizing} {Vertical} {Mixing} of {Momentum} in {Non}-eddy {Resolving} {Ocean} {Models}},
volume = {20},
issn = {0022-3670},
url = {https://journals.ametsoc.org/doi/abs/10.1175/1520-0485%281990%29020%3C1634%3AOPVMOM%3E2.0.CO%3B2},
doi = {10.1175/1520-0485(1990)020<1634:OPVMOM>2.0.CO;2},
abstract = {We investigate the consequence, at small Ekman number, of adding vertical mixing of momentum terms to the incompressible thermocline equations. We find that choosing the vertical eddy viscosity, ν = Af2/N2, where f is the Coriolis parameter and N is the local value of the buoyancy frequency, leads to isopycnal mixing of fQ, where Q is the reciprocal of potential vorticity, provided A is independent of the vertical coordinate. If, additionally, A is also independent of the north–south coordinate, then on a beta-plane, this implies homogenization of potential vorticity, q, within closed q-contours on isopycnal surfaces. This conclusion extends to spherical geometry if ν is also inversely proportional to β, the gradient of f with respect to latitude, i.e. ν = Af2/(N2β). The connection with the recent work of Gent and McWilliams and the consequences for coarse resolution numerical model studies are discussed.},
number = {10},
urldate = {2019-08-02},
journal = {Journal of Physical Oceanography},
author = {Greatbatch, Richard J. and Lamb, Kevin G.},
month = oct,
year = {1990},
pages = {1634--1637}
}
@article{Thomas2015,
title = {A lagrangian method to isolate the impacts of mixed layer subduction on the meridional overturning circulation in a numerical model},
volume = {28},
issn = {08948755},
doi = {10.1175/JCLI-D-14-00631.1},
abstract = {AbstractLarge differences in the Atlantic meridional overturning circulation (AMOC) exhibited between the available ocean models pose problems as to how they can be interpreted for climate policy. A novel Lagrangian methodology has been developed for use with ocean models that enables a decomposition of the AMOC according to its source waters of subduction from the mixed layer of different geographical regions. The method is described here and used to decompose the AMOC of the Centre National de Recherches Météorologiques (CNRM) ocean model, which is approximately 4.5 Sv (1 Sv = 106 m3 s−1) too weak at 26°N, compared to observations. Contributions from mixed layer subduction to the peak AMOC at 26°N in the model are dominated by the Labrador Sea, which contributes 7.51 Sv; but contributions from the Nordic seas, the Irminger Sea, and the Rockall basin are also important. These waters mostly originate where deep mixed layers border the topographic slopes of the Subpolar Gyre and Nordic seas. The too-weak m...},
number = {19},
journal = {Journal of Climate},
author = {Thomas, Matthew D. and Tr??guier, Anne Marie and Blanke, Bruno and Deshayes, Julie and Voldoire, Aurore},
year = {2015},
keywords = {Atm/Ocean Structure/Phenomena, Circulation/Dynamics, Convection, General circulation models, Lagrangian circulation/transport, Meridional overturning circulation, Models and modeling, Oceanic mixed layer},
pages = {7503--7517}
}
@article{stouffer_assessing_2017,
title = {Assessing temperature pattern projections made in 1989},
volume = {7},
issn = {1758-678X, 1758-6798},
url = {http://www.nature.com/articles/nclimate3224},
doi = {10.1038/nclimate3224},
language = {en},
number = {3},
urldate = {2019-01-02},
journal = {Nature Climate Change},
author = {Stouffer, Ronald J. and Manabe, Syukuro},
month = mar,
year = {2017},