diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..e9f0bd7694 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,6 @@ +*.svg filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.jpg filter=lfs diff=lfs merge=lfs -text +*.jpeg filter=lfs diff=lfs merge=lfs -text +*.gif filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/.lfsconfig b/.lfsconfig new file mode 100644 index 0000000000..3d2778fad3 --- /dev/null +++ b/.lfsconfig @@ -0,0 +1,2 @@ +[lfs] + fetchexclude = * diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 0000000000..47304542de --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,42 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = clmdoc +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +# Before building html or pdf, obtain all of the images. This is needed +# because we have configured this repository (via an .lfsconfig file at +# the top level) to NOT automatically fetch any of the large files when +# cloning / fetching. +html: fetch-images +latexpdf: fetch-images +fetch-images: + git lfs pull --exclude="" + +.PHONY: help fetch-images Makefile + +# For the pdf, we only build the tech note, but use the conf.py file in +# the top-level source directory (the -c option says where to find +# conf.py). Note that we also override the setting of +# numfig_secnum_depth in order to have figure numbering as desired in +# the pdf, given that the pdf just contains the tech note, so doesn't +# have the top-level numbering present in the web documentation (where +# top-level section 1 is the User's Guide and section 2 is the Tech +# Note). +# +# The use of $(0) is as in the catch-all target. +latexpdf: + $(SPHINXBUILD) -M $@ "$(SOURCEDIR)/tech_note" "$(BUILDDIR)" -c "$(SOURCEDIR)" -D numfig_secnum_depth=1 $(SPHINXOPTS) $(O) + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + $(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/doc/UsersGuide/Makefile b/doc/UsersGuide/Makefile deleted file mode 100644 index 1c76bc75bd..0000000000 --- a/doc/UsersGuide/Makefile +++ /dev/null @@ -1,193 +0,0 @@ -# -# Makefile to convert DocBook CLM Users-Guide into html and/or pdf -# (rtf, txt, ps, tex, man, dvi, and texi are also valid docbook formats) -# -VPATH := ../../tools/cprnc . .. ../../bld ../../tools/ncl_scripts \ - ../../tools/mksurfdata ../../test/system ../../bld/namelist_files \ - ../../bld/config_files ../../tools ../../../../../scripts/ccsm_utils/Tools \ - ../../../../../scripts/ccsm_utils/Tools/lnd/clm/PTCLM/ ../../src/main - -PDFUG := clm_ug.pdf -HTMLUG := book1.html -DOCBKUG := clm_ug.xml -CFGLOG := config_help -CPRLOG := cprnc_readme -BNMLOG := buildnml_help -BSTLOG := build_streams_help -RESLOG := buildnml_resolutions -USCLOG := buildnml_usecases -QCKLOG := quickstart_guide -COPLOG := filecopies -MKSLOG := mksurfdata.pl -USRLOG := quickstart_usrdat -PTCLOG := ptclm_help -PTCLST := ptclm_list -TDRLOG := test_driver.sh -GETREG := getregional_datasets -CO2DIF := addco2_datm.buildnml -DATLOG := build_date -NMLDFTBL := namelist_definition_table -NMLDLTBL := namelist_defaults_clm_table -HSFLDTBL := history_fields_table -CFGDFTBL := config_definition_table -COMPLIST := compsets_list_ofIcases.xml -SOURCES := $(DOCBKUG) $(COMPLIST) $(CFGLOG).xml $(PTCLOG).xml $(BNMLOG).xml \ - $(RESLOG).xml $(USCLOG).xml $(QCKLOG).xml $(COPLOG).xml $(PTCLST).xml \ - $(USRLOG).xml $(GETREG).xml preface.xml custom.xml special_cases.xml \ - tools.xml adding_files.xml single_point.xml addco2_datm.buildnml.xml \ - appendix.xml trouble_shooting.xml ptclm.xml $(BSTLOG).xml \ - $(MKSLOG).xml $(TDRLOG).xml $(DATLOG).xml $(CPRLOG).xml \ - $(NMLDFTBL).xml $(NMLDLTBL).xml $(CFGDFTBL).xml $(HSFLDTBL).xml - -CONVAMP := sed 's/\&/\&/g' -CONVSIGNS := sed 's/>/\>/g' | sed 's/ tmpFile.txt - addxhtmlhead.pl tmpFile.txt > $@ - $(RM) tmpFile.txt - -$(NMLDLTBL).xhtml: namelist_defaults_clm.xml namelist_defaults.xsl - xsltproc ../../bld/namelist_files/namelist_defaults.xsl $< > tmpFile.txt - addxhtmlhead.pl tmpFile.txt > $@ - $(RM) tmpFile.txt - -$(CFGDFTBL).xhtml: config_definition.xml config_definition.xsl - xsltproc ../../bld/config_files/config_definition.xsl $< > tmpFile.txt - addxhtmlhead.pl tmpFile.txt > $@ - $(RM) tmpFile.txt - -history_fields.xml: findHistFields.pl - cd ../../src/main; ./findHistFields.pl >& tmpFile.txt - $(RM) tmpFile.txt - -$(HSFLDTBL).xhtml: history_fields.xml history_fields.xsl - xsltproc ../../bld/namelist_files/history_fields.xsl $< > tmpFile.txt - addxhtmlhead.pl tmpFile.txt > $@ - $(RM) tmpFile.txt - -.xhtml.xml: - xsltproc stylesheethtml2docbook.xsl $< > $@ - -.tlog.xml: - $(CONVAMP) $< | $(CONVSIGNS) > tempFile.txt - limitLineLen.pl tempFile.txt > $@ - $(RM) tempFile.txt - -.diff.xml: - $(CONVAMP) $< | $(CONVSIGNS) > $@ - -debug: - @echo "SOURCES: $(SOURCES)" - @echo "CONVAMP: $(CONVAMP)" - @echo "CONVSIGNS: $(CONVSIGNS)" - -$(COMPLIST): - ./get_Icaselist.pl > $@ - -$(HTMLUG): $(SOURCES) - docbook2html --dsl clm_stylesheet.dsl#html $< - -$(PDFUG): $(SOURCES) - docbook2pdf --dsl clm_stylesheet.dsl#print $< - -$(BNMLOG).tlog: build-namelist - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $@ is good and redo your make" - ../../bld/build-namelist -help >& $@ - -$(BSTLOG).tlog: build_streams - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $@ is good and redo your make" - ../../../../../scripts/ccsm_utils/Tools/build_streams --help >& $@ - -$(DATLOG).tlog: - @echo "Get current build date" - date +%b-%d-%Y >& $@ - -$(RESLOG).tlog: build-namelist - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $@ is good and redo your make" - ../../bld/build-namelist -res list >& $@ - -$(USCLOG).tlog: build-namelist - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $@ is good and redo your make" - ../../bld/build-namelist -use_case list >& $@ - -$(CFGLOG).tlog: configure - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $@ is good and redo your make" - ../../bld/configure -help >& $@ - -$(PTCLOG).tlog: PTCLM.py - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $@ is good and redo your make" - ../../../../../scripts/ccsm_utils/Tools/lnd/clm/PTCLM/PTCLM.py --help >& $@ - -$(PTCLST).tlog: PTCLM.py - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $@ is good and redo your make" - cd ../../../../../scripts/ccsm_utils/Tools/lnd/clm/PTCLM; \ - PTCLM.py --list >& $(CURDIR)/$@ - -$(MKSLOG).tlog: mksurfdata.pl - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $@ is good and redo your make" - ../../tools/mksurfdata/mksurfdata.pl -help >& $@ - -$(TDRLOG).tlog: test_driver.sh - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $@ is good and redo your make" - ../../test/system/test_driver.sh -help >& $@ - -$(QCKLOG).tlog: Quickstart.GUIDE - cp $< $@ - -$(COPLOG).tlog: README.filecopies - cp $< $@ - -$(CPRLOG).tlog: README - cp $< $@ - -$(USRLOG).tlog: Quickstart.userdatasets - cp $< $@ - -$(GETREG).tlog: getregional_datasets.pl - @echo "The following line will fail in the make as it calls die -- but that is expected" - @echo "Check that the output $(GETREG) is good and redo your make" - ../../tools/ncl_scripts/getregional_datasets.pl -help >& $@ - -clean: - $(RM) -f $(HTMLUG) $(PDFUG) *.tlog $(DATLOG).xml *.xhtml *.tex - -realclean: clean - $(RM) -f f*.html c*.html x*.html a*.html i*.html $(COMPLIST) $(CFGLOG).xml \ - $(BNMLOG).xml $(BSTLOG).xml $(PTCLOG).xml $(PTCLST).xml \ - $(RESLOG).xml $(USCLOG).xml $(USRLOG).xml $(GETREG).xml $(QCKLOG).xml \ - $(CO2DIF).xml *.tlog $(MKSLOG).xml $(TDRLOG).xml $(DATLOG).xml \ - $(NMLDFTBL).xml $(NMLDLTBL).xml $(CFGDFTBL).xml $(CPRLOG).xml \ - $(COPLOG).xml diff --git a/doc/UsersGuide/addco2_datm.buildnml.diff b/doc/UsersGuide/addco2_datm.buildnml.diff deleted file mode 100644 index b8fbf34e36..0000000000 --- a/doc/UsersGuide/addco2_datm.buildnml.diff +++ /dev/null @@ -1,59 +0,0 @@ -*** datm.buildnml.csh.orig 2010-06-11 10:59:29.246523532 -0600 ---- datm.buildnml.csh 2010-06-11 11:06:30.710784206 -0600 -*************** -*** 34,48 **** - streams = 'clm_qian.T62.stream.Solar.txt 1895 1948 1972 ', - 'clm_qian.T62.stream.Precip.txt 1895 1948 1972 ', - 'clm_qian.T62.stream.TPQW.txt 1895 1948 1972 ', -! 'presaero.stream.txt 1849 1849 2006' - vectors = 'null' - mapmask = 'nomask', - 'nomask', - 'nomask', - 'nomask' - tintalgo = 'coszen', - 'nearest', - 'linear', - 'linear' - / - EOF1 ---- 34,56 ---- - streams = 'clm_qian.T62.stream.Solar.txt 1895 1948 1972 ', - 'clm_qian.T62.stream.Precip.txt 1895 1948 1972 ', - 'clm_qian.T62.stream.TPQW.txt 1895 1948 1972 ', -! 'presaero.stream.txt 1849 1849 2006', -! 'datm.global1val.stream.CO2.txt 1766 1766 2005 ' - vectors = 'null' - mapmask = 'nomask', - 'nomask', - 'nomask', -+ 'nomask', - 'nomask' -+ mapalgo = 'bilinear', -+ 'bilinear', -+ 'bilinear', -+ 'bilinear', -+ 'nn' - tintalgo = 'coszen', - 'nearest', - 'linear', -+ 'linear', - 'linear' - / - EOF1 -*************** -*** 1112,1121 **** ---- 1120,1132 ---- - - EOF1 - -+ cp $CASEBUILD/co2_streams.txt datm.global1val.stream.CO2.txt -+ - - $CASETOOLS/listfilesin_streams -input_data_list -t clm_qian.T62.stream.Solar.txt >> $CASEBUILD/datm.input_data_list - $CASETOOLS/listfilesin_streams -input_data_list -t clm_qian.T62.stream.Precip.txt >> $CASEBUILD/datm.input_data_list - $CASETOOLS/listfilesin_streams -input_data_list -t clm_qian.T62.stream.TPQW.txt >> $CASEBUILD/datm.input_data_list -+ $CASETOOLS/listfilesin_streams -input_data_list -t datm.global1val.stream.CO2.txt >> $CASEBUILD/datm.input_data_list - - cat >! presaero.stream.txt << EOF1 - diff --git a/doc/UsersGuide/adding_files.xml b/doc/UsersGuide/adding_files.xml deleted file mode 100644 index 18c034cbbb..0000000000 --- a/doc/UsersGuide/adding_files.xml +++ /dev/null @@ -1,357 +0,0 @@ - - -Adding New Resolutions or New Files to the build-namelist Database - -In the last chapter we gave the details on how to create new files for input into -CLM. These files could be either global resolutions, regional-grids or even a single -grid point. If you want to easily have these files available for continued use in your -development you will then want to include them in the build-namelist database so -that build-namelist can easily find them for you. You can deal with them, just by -editing your namelist by hand (or using a &usernlclm; namelist file), or by using -&CLMUSRDAT;. Another way to deal with them is to enter them into -the database for build-namelist, so that build-namelist can find them for you. -This keeps one central database for all your files, rather than having multiple locations -to keep track of files. If you have a LOT of files to keep track of it also might -be easier than keeping track by hand, especially if you have to periodically update -your files. If you just have a few quick experiments to try, for a short time period -you might be best off using the other methods mentioned above. - - -There are two parts to adding files to the build-namelist database. The first part -is adding new resolution names which is done in the -models/lnd/clm/bld/namelist_files/namelist_definition.xml file -(and in the -models/lnd/clm/bld/config_files/config_definition.xml file -when adding supported single-point datasets). -The second part is actually adding the new filenames which is done in the -models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml file -(models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml -file for CLM tools). -If you aren't adding any new resolutions, and you are just changing the files for existing -resolutions, you don't need to edit the namelist_definition file. - - - -Managing Your Own Data-files - -If you are running on a supported machine (such as bluefire or jaguar) the standard -input datasets will already be available and you won't have to check them out of the -subversion inputdata server. However, you also will NOT be able to add your own datafiles -to these standard inputdata directories -- because most likely you won't have permissions -to do so. In order to add files to the XML database or to use &CLMUSRDAT; you need -to put data in the standard locations so that they can be found. The recommended -way to do this is to use the link_dirtree tool in the &cesm; scripts. -Some information on link_dirtree is available in the -&cesmrel; Scripts User's Guide. We also have -some examples of it's use here and in other sections of this User's Guide. - - -Using link_dirtree is quite simple, you give the directory where -data exists and then the directory that you want to create where datasets will point -to the original source files. In the example below we use "$HOME/inputdata", but -MYCSMDATA could be any directory you have access to where you want to -put your data. - -> cd scripts -# First make sure you have a inputdata location that you can write to -# You only need to do this step once, so you won't need to do this in the future -# (except to bring in any updated files in the original $CSMDATA location). -> setenv MYCSMDATA $HOME/inputdata # Set env var for the directory for input data -> ./link_dirtree $CSMDATA $MYCSMDATA - -Then when you create a case you will change DIN_LOC_ROOT_CSMDATA to -point to the location you linked to rather than the default location. - -> ./xmlchange -file env_run.xml -id DIN_LOC_ROOT_CSMDATA -val $MYCSMDATA - - - -In order to list the files that you have created you merely need to use the UNIX -command find to find the files that are NOT softlinks. So for -example executing the following command: - -> find $MYCSMDATA -type f -print - -for me gives the following list of &CLMUSRDAT; files that I have created. - -/blhome/erik/inputdata/atm/cam/chem/trop_mozart_aero/aero/aerosoldep_monthly_1849-2006_1x1pt_US-Ha1.nc -/blhome/erik/inputdata/atm/cam/chem/trop_mozart_aero/aero/aerosoldep_monthly_1849-2006_13x12pt_f19_alaskaUSA.nc -/blhome/erik/inputdata/atm/cam/chem/trop_mozart_aero/aero/aerosoldep_rcp8.5_monthly_1850-2100_13x12pt_f19_alaskaUSA.nc -/blhome/erik/inputdata/atm/cam/chem/trop_mozart_aero/aero/aerosoldep_rcp4.5_monthly_1850-2100_13x12pt_f19_alaskaUSA.nc -/blhome/erik/inputdata/atm/datm7/domain.clm/domain.lnd.1x1pt_US-Ha1_USGS.nc -/blhome/erik/inputdata/atm/datm7/domain.clm/domain.lnd.13x12pt_f19_alaskaUSA_gx1v6.nc -/blhome/erik/inputdata/lnd/clm2/griddata/fracdata_13x12pt_f19_alaskaUSA_gx1v6.nc -/blhome/erik/inputdata/lnd/clm2/griddata/fracdata_1x1pt_US-Ha1_USGS.nc -/blhome/erik/inputdata/lnd/clm2/griddata/topodata_13x12pt_f19_alaskaUSA.nc -/blhome/erik/inputdata/lnd/clm2/griddata/griddata_1x1pt_US-Ha1.nc -/blhome/erik/inputdata/lnd/clm2/griddata/griddata_13x12pt_f19_alaskaUSA.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata_13x12pt_f19_alaskaUSA_simyr1850.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata_1x1pt_US-Ha1_simyr2000.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata.pftdyn_rcp4.5_13x12pt_f19_alaskaUSA_simyr1850-2100.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata_1x1pt_US-Ha1_simyr1850.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata_13x12pt_f19_alaskaUSA_simyr2000.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata.pftdyn_1x1pt_US-Ha1_simyr1849-2006.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata.pftdyn_13x12pt_f19_alaskaUSA_simyr1850-2100.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata.pftdyn_rcp8.5_13x12pt_f19_alaskaUSA_simyr1850-2100.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata.pftdyn_13x12pt_f19_alaskaUSA_simyr1849-2006.nc -/blhome/erik/inputdata/lnd/clm2/surfdata/surfdata.pftdyn_1x1pt_US-Ha1_simyr1850-2100.nc - -You can also use find to list files that have a particular pattern -in the name as well (using the -name option with wildcards). Also you can always rerun the -link_dirtree command if any new files are added that you need to be -linked into your directory tree. Since, the files are soft-links -- it doesn't take up -much space other than the files that you add there. This way all of the files are kept -in one place, they are organized by usage according to &cesm; standards, and you can -easily find your own files, and &clm; can find them as well. - - - - -Adding Resolution Names - -If you are adding files for new resolutions which aren't covered in the -namelist_definition file -- you'll need to add them in. The list of valid resolutions -is in the id="res" entry in the -models/lnd/clm/bld/namelist_files/namelist_definition.xml file. -You need to choose a name for your new resolution and simply add it to the comma -delimited -list of valid_values for the id="res" entry. The convention for global Gaussian grids -is number_of_latitudes x number_of_longitudes. The convention for global finite -volume grids is latitude_grid_size x longitude_grid_size where latitude and longitude -is measured in degrees. For regional or single-point datasets the names have a grid size -number_of_latitudes x number_of_longitudes followed by an underscore and then a -descriptive name such as a City name followed by an abbreviation for the Country in caps. -The only hard requirement is that names be unique for different grid files. -Here's what the entry for resolutions looks like in the file: - -<entry id="res" type="char*30" category="default_settings" - group="default_settings" - valid_values= -"128x256,64x128,48x96,32x64,8x16,94x192,0.23x0.31,0.47x0.63, -0.9x1.25,1.9x2.5,2.65x3.33,4x5,10x15,5x5_amazon, -1x1_camdenNJ,1x1_vancouverCAN,1x1_mexicocityMEX,1x1_asphaltjungleNJ, -1x1_brazil,1x1_urbanc_alpha,0.5x0.5"> -Horizontal resolutions -</entry> - -As you can see you just add your new resolution names to the end of the valid_values -list. - - -When using &ptclm; and adding supported single-point resolutions, you'll also want to -add these resolutions to the -models/lnd/clm/bld/config_files/config_definition.xml under -the sitespf_pt name. The entry in that file looks like: - -<entry id="sitespf_pt" -valid_values="none,1x1_brazil,5x5_amazon, -1x1_camdenNJ,1x1_vancouverCAN,1x1_mexicocityMEX,1x1_asphaltjungleNJ, -1x1_urbanc_alpha,1x1_numaIA,1x1_smallvilleIA" -value="none" category="physics"> -Flag to turn on site specific special configuration flags for supported single -point resolutions. -Currently the only special settings are for MEXICOCITY and VANCOUVER, which make -changes to urban parameters. -</entry> - -&ptclm; assumes that any supported single-point resolutions are valid settings for -sitespf_pt. - - - - -Adding or Changing Default Filenames - -To add or change the default filenames you edit the -models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml -and either change an existing filename or add a new one. Most entries in the -default namelist files, include different attributes that describe the different -properties that describe the differences in the datasets. Attributes include -the: resolution, year to simulation, range of years to simulate for transient -datafiles, the land-mask, the representative concentration pathway (rcp) for future -scenarios, and the type of biogeochemistry (bgc) model used. For example the -fatmgrid for the 1.9x2.5 resolution is as follows: - -<fatmgrid hgrid="1.9x2.5" >lnd/clm2/griddata/griddata_1.9x2.5_060404.nc -</fatmgrid> - -Other fatmgrid files are distinguished from this one by -their resolution (hgrid) attribute. - - -To add or change the default filenames for &clm; tools edit the -models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml -and either change an existing filename or add a new one. Editing this file is -similar to the namelist_defaults_clm.xml talked about above. - - -What are the required files? - -Different types of simulations and different types of configurations for &clm; require -different lists of files. The Carbon Nitrogen (cn) Biogeochemistry model for example -requires stream_fldfilename_ndep files, which are NOT required by -other bgc modes. Transient simulations also require transient datasets, and the names -of these datasets are sometimes different from the static versions (sometimes both are -required as in the dynamic PFT cases). - - -In the following table we list the different files used by CLM, they are listed -in order of importance, dependencies, and customizing. So the required files -are all near the top, and the files used only under different conditions are listed -later, and files with the fewest dependencies are near the top, as are the files -that are least likely to be customized. - - -Required Files for Different Configurations and Simulation Types - - - - - - - Filename - Config. type - Simulation type - Resol. Dependent? - Other Dependencies? - - - Notes - - - - - - fpftcon - ALL - ALL - No - No - - - Not usually customized, as describes plant function -type properties. &ptclm; copies the file for you so that you can customize it if you -like, see . - - - fsnowoptics - ALL - ALL - No - No - - - Not usually customized as describes global snow optical properties. - - - fsnowaging - ALL - ALL - No - No - - - Not usually customized as describes global snow aging properties. - - - fatmgrid - ALL - ALL - Yes - No - - - Creating, using mkgriddata -usually gives you the amount of customization -you need, as it just describes the grid and grid extents. - - - fatmlndfrc - ALL - ALL - Yes - land-mask - - - Describes the land-mask for points with active land, as well as the fraction -of each grid-cell covered by land. You might customize it to make sure the land-fraction -of your grid-cell matches the expected values for your site. But, usually you will just -use what mkgriddata gives you. - - - fsurdat - ALL - ALL - Yes - simulation-year - - - Describes percentages of different land-units, columns and -vegetation types within each grid-cell. To customize for a specific point -or region you may want to use custom input datasets to mksurfdata when -creating the file. mksurfdata also allows you to customize the PFT, -and soil types to it see . &ptclm; takes -advantage of this to create customized datasets as well, see the chapter on &ptclm; -at . - - - flanduse_timeseries - ALL - transient land-use land-cover change - Yes - Simulation year range, and representative concentration pathway (rcp) - - - See notes on fsurdat files. - - - frivinp_rtm - RTM only - ALL - No - No - - - We only provide a half-degree global river routing file. If you want -to model river flow for a smaller scale, or a basin regional scale, you would -need to create your own custom file to do that. Normally, we turn river-routing -OFF for regional or single point simulations. - - - finidat - ALL - RUN_TYPE="startup", CLM_FORCE_COLDSTART="off" - Yes - mask, maxpft, bgc, simulation-year, start-date - - - Used for starting the model from a spun-up state. -Create these files by running the model -for multiple years and saving the restart file from the end of a spin-up -simulation. - - - - stream_fldfilename_ndep - bgc=cn/cndv - Yes - No - simulation-year - - - -You may customize this file to get the Nitrogen deposition characteristics -of your site if available. This file will be interpolated while the model is -running from it's resolution to the resolution that &clm; is running at. - - - - - -
-
- -
- -
- diff --git a/doc/UsersGuide/addxhtmlhead.pl b/doc/UsersGuide/addxhtmlhead.pl deleted file mode 100755 index d5f9615c58..0000000000 --- a/doc/UsersGuide/addxhtmlhead.pl +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env perl -# -use strict; -use Cwd; -use English; -use IO::File; -use Getopt::Long; -use IO::Handle; -#----------------------------------------------------------------------------------------------- - -# Get the directory name and filename of this script. If the command was -# issued using a relative or absolute path, that path is in $ProgDir. Otherwise assume -# the -# command was issued from the current working directory. - -(my $ProgName = $0) =~ s!(.*)/!!; # name of this script -my $ProgDir = $1; # name of directory containing this script -- may be a - # relative or absolute path, or null if the script - # is in - # the user's PATH -my $nm = "$ProgName::"; # name to use if script dies -my $scrdir; -if ($ProgDir) { - $scrdir = $ProgDir; -} else { - $scrdir = getcwd() -} - -sub usage { - my $msg = shift; - - print "ERROR:: $msg\n"; - die < -OPTIONS - NONE -EOF -} - -my %opts = ( ); - -GetOptions( -) or usage(); - -if ( $#ARGV != 0 ) { - &usage( "Wrong number of command line arguments" ); -} - -my $inputFile = $ARGV[0]; - -if ( ! -f $inputFile ) { - &usage( "Input file does NOT exist : $inputFile" ); -} - -my $fh = IO::File->new($inputFile, '<') or die "** $nm - can't open input file: -$inputFile\n"; - -# -# Add in XML XHTML headers -# -print <<"EOF"; - - - -EOF -while (my $line = <$fh>) { - if ( $line =~ /^$/ ) { - print "
\n"; - } elsif ( $line =~ /^'."\n"; - } else { - print $line; - } -} -$fh->close(); diff --git a/doc/UsersGuide/appendix.xml b/doc/UsersGuide/appendix.xml deleted file mode 100644 index 058e67d577..0000000000 --- a/doc/UsersGuide/appendix.xml +++ /dev/null @@ -1,305 +0,0 @@ - -Editing Template Files Before Configure - -The last kind of customization that you can do for a case, before configure is run -is to edit the templates. The &clm; template is in -models/lnd/clm/bld/clm.cpl7.template, the &datm; template is -in models/atm/datm/bld/datm.cpl7.template, and the driver templates -are in the models/drv/bld directory and are named: -ccsm.template and cpl.template. When a case is -created they are also copied to the Tools/Templates directory -underneath your case. If you want to make changes that will impact all your cases, you -should edit the template files under the models directory, but -if you want to make a change ONLY for a particular case you should edit the template -under that specific case. - - - -Editing the template files is NOT for the faint of heart! We recommend this ONLY for -experts! It's difficult to do because the template is a script that actually creates -another script. So part of the script is echoing the script to be created and part of -it is a script that is run when "configure -case" is run. As a result any variables -in the part of the script that is being echoed have to be escaped like this: - -\$VARIABLE - -But, in other parts of the script that is run, you can NOT escape variables. So you -need to understand if you are in a part of the script that is echoing the script to -be created, or in the part of the script that is actually run. - - - -If you can customize your case using: compsets, env_*.xml variables, -or a user namelist, as outlined in you should do so. -The main reason to actually edit the template files, is if you are in a situation where -the template aborts when you try it run it when "configure -case" is run. The other -reason to edit the template is if you are &clm; developer and need to make adjustments -to the template because of code or script updates. An example of modifying the &datm; -template is in where sed is used to modify the path -for &CPLHIST; data. - - -Outline of the &clm; template - -The outline of the &clm; template is as follows: - -# set up options for clm configure and then run clm configure -$CODEROOT/lnd/clm*/bld/configure <options> -# set up options for clm build-namelist and then run clm build-namelist -$CODEROOT/lnd/clm*/bld/build-namelist <options> -# echo the $CASEBUILD/clm.buildnml.csh script out -cat >! $CASEBUILD/clm.buildnml.csh << EOF1 -# NOTE: variables in this section must be escaped -EOF1 -# Remove temporary namelist files - -# echo the $CASEBUILD/clm.buildexe.csh script out -cat > $CASEBUILD/clm.buildexe.csh <<EOF2 -# NOTE: variables in this section must be escaped -EOF2 -# Remove temporary configure files - - - - - -Outline of the &datm; template - -The outline of the &datm; template is as follows: - -# Check $GRID to set the $DOMAINFILE and $DOMAINPATH - -# Check DATM_PRESAERO to set the prescribed aerosol option -# If &CLMUSRDAT; is set and $DOMAINFILE is NOT -- set it by &CLMUSRDAT; -# Ensure $DOMAINFILE is set or else abort - -#============================================================================== -# Create resolved prestage data script -#============================================================================== -cat >! $CASEBUILD/datm.buildnml.csh << EOF1 -# NOTE: variables in this section must be escaped -EOF1 -# Major if blocks look at DATM_MODE: -# the if blocks setup streams and run Tools/build_streams to create stream files -#----- CLM_QIAN mode ---------------------------------------------------------- -else if ($DATM_MODE == "&CLMQIAN;" ) then -. - # Customize &CLMQIAN; options here - - # A.) Setup datm_atm_in namelist -cat >! $CASEBUILD/datm.buildnml.csh << EOF -cat >! datm_atm_in << EOF1 -# NOTE: variables in this section must be escaped -EOF1 -EOF - - # B.) Setup options to build_streams -. -. -. -#----- CLM1PT mode ---------------------------------------------------------- -else if ($DATM_MODE == "CLM1PT" ) then -. - # Customize CLM1PT options here - - # A.) Setup datm_atm_in namelist -cat >! $CASEBUILD/datm.buildnml.csh << EOF -cat >! datm_atm_in << EOF1 -# NOTE: variables in this section must be escaped -EOF1 -EOF - - # B.) Setup options to build_streams -. -. -. -. -#----- CPLHIST 3-hourly time-averaging mode ----------------------------------------------------------- -else if ($DATM_MODE == "&CPLHIST;" ) then -. - # Customize &CPLHIST; options here - - # A.) Setup datm_atm_in namelist -cat >! $CASEBUILD/datm.buildnml.csh << EOF -cat >! datm_atm_in << EOF1 -# NOTE: variables in this section must be escaped -EOF1 -EOF - - # B.) Setup options to build_streams -. -. -. -. - -#----- INVALID mode -----------------------------------------------------------else - echo "ERROR: unrecognized DATM_MODE = \$DATM_MODE " - exit -1 -endif - -#============================================================================== -# Create prescribed aero streams if appropriate -#============================================================================== -. -. -. -#============================================================================== -# Create remaining resolved namelist -#============================================================================== - -cat >! $CASEBUILD/datm.buildnml.csh << EOF -cat >! datm_in << EOF1 -# NOTE: variables in this section must be escaped -. -. -. -EOF1 - -EOF - -#============================================================================== -# Create script to build executable -#============================================================================== - -cat > $CASEBUILD/datm.buildexe.csh <<EOF -#! /bin/csh -f -# NOTE: variables in this section must be escaped -EOF - -#============================================================================== -# end of script -#============================================================================== - - - - - -Adding a new DATM_MODE to the &datm; template - - The steps to adding a new DATM_MODE - -Add a new "if" block to the &datm; template - -As you can see from above -there are major "if" blocks for the different DATM_MODE's. So adding a new -DATM_MODE means adding a new "if" block. The two major parts of each DATM_MODE -block are: - -Setup datm_atm_in namelist -Setup options to build_streams - - - - -In the "if" block create the <filename>datm_atm_in</filename> namelist - -See for some notes about the -&datm; namelist and streams files. That and the - -&datm; User's Guide should give you guidance on how to -setup the namelist for your case. - - - -In the "if" block create options to and call <command>build_streams</command> - -The next part of the "if" block in the &datm; template file to work with is the -call to build_streams. You may need to add additional options -to it. You may also need to call it multiple times for multiple streams. You will -also likely need to add a new source option to it with the "-s" option. For more -information on build_streams do the following. - -Getting help with <command>build_streams</command> for &datm; - -> scripts/ccsm_utils/Tools/build_streams -help - - -The output of the above command is: - - -&build_streams_help; - - - - - -Add new streams templates to the &datm; -<filename>datm.template.streams.xml</filename> file - -As part of modifying the behavior of build_streams you will also -have to edit the models/atm/datm7/bld/datm.template.streams.xml -file as well (or the local version in your -$CASENAME/Tools/Templates directory for a particular case). -The template is an XML file much like the output streams file, but there are attributes -to distinguish which fields will be used based on things like: RESOLUTION or datasource. -And there are filename indicators (starting with a "%") that get translated into various -things such as: - -%c = Case (from above -case command line option) -%do = Use domain file -%y = Year (through range given from begyear to endyear) -%ym = Year-Month (all 12 months through year range) -%6ym = Like %ym but 6 digit year (ie. %YYYYYY-MM). (can replace the 6 with any digit 1-9) - - - - -Add a new valid_value to the <filename>config_definition.xml</filename> file in -scripts. - -Adding a new DATM_MODE also requires adding a new valid_value to -scripts/ccsm_utils/Case.template/config_definition.xml. This -enables the scripts to recognize the new value as a valid option to DATM_MODE -in the &envconf; file. - - - - - - - -Building the Users-Guide Documentation for &clm; - -All of the documentation for &clm; can be built using GNU Makefiles that are -available in the appropriate directories. The Makefiles require the following -utilities: docbook2html, docbook2pdf, -protex, and latex2html. - - -To build the Users Guide for &clm; (requires docbook). - -> cd models/lnd/clm/doc/UsersGuide -> gmake - -Note, that when the Users-Guide is built it will get output from other &clm; -utilities that by nature abort, and hence stop the make from continuing. However, -this is expected so you should simply run gmake again until -it either completes or comes upon a legitimate issue. Here is what a sample -warning looks like when gmake is run. - -The following line will fail in the make as it calls die -- but that is expected -Check that the output config_help.tlog is good and redo your make -../../bld/configure -help >&` config_help.tlog -make: *** [config_help.tlog] Error 255 - -To build the Code Reference Guide for &clm; (requires protex and -latex2html). The make here uses a Filepath -file that points to the list of directories that you want protex -to run over. You should examine this file and make sure it is appropriate for what -you need to do, before running the make. - -> cd models/lnd/clm/doc/CodeReference -> gmake - -To build the table of tests for the &clm; test suite. The make here runs a UNIX -shell script to create a html table of the list of tests run on the different machines -from the &clm; test suite. - -> cd models/lnd/clm/test/system -> gmake - - - - - diff --git a/doc/UsersGuide/badpergro.jpg b/doc/UsersGuide/badpergro.jpg deleted file mode 100644 index 4a378cf52d..0000000000 Binary files a/doc/UsersGuide/badpergro.jpg and /dev/null differ diff --git a/doc/UsersGuide/clm_stylesheet.dsl b/doc/UsersGuide/clm_stylesheet.dsl deleted file mode 100644 index 7bc3ed0036..0000000000 --- a/doc/UsersGuide/clm_stylesheet.dsl +++ /dev/null @@ -1,154 +0,0 @@ - - -]> - - - - - - - - - -;;Default extension for filenames -(define %html-ext% ".html") -;;What font would you like for the body? -(define %body-font-family% - "Arial") - -(element emphasis -(if (equal? (normalize "bold") (attribute-string (normalize "role"))) - ($bold-seq$) - ($italic-seq$))) - -(element tgroup - (let* ((wrapper (parent (current-node))) - (frameattr (attribute-string (normalize "frame") wrapper)) - (pgwide (attribute-string (normalize "pgwide") wrapper)) - (footnotes (select-elements (descendants (current-node)) - (normalize "footnote"))) - (border (if (equal? frameattr (normalize "none")) - '(("BORDER" "0")) - '(("BORDER" "1")))) - (bgcolor '(("BGCOLOR" "#E0E0E0"))) - (width (if (equal? pgwide "1") - (list (list "WIDTH" ($table-width$))) - '())) - (head (select-elements (children (current-node)) (normalize "thead"))) - (body (select-elements (children (current-node)) (normalize "tbody"))) - (feet (select-elements (children (current-node)) (normalize "tfoot")))) - (make element gi: "TABLE" - attributes: (append - border - width - bgcolor - '(("CELLSPACING" "0")) - '(("CELLPADDING" "4")) - (if %cals-table-class% - (list (list "CLASS" %cals-table-class%)) - '())) - (process-node-list head) - (process-node-list body) - (process-node-list feet) - (make-table-endnotes)))) - - -;;Should verbatim items be 'shaded' with a table? -(define %shade-verbatim% - #t) - -;;Define shade-verbatim attributes -(define ($shade-verbatim-attr$) - (list - (list "BORDER" "0") - (list "BGCOLOR" "#E0E0E0") - (list "WIDTH" ($table-width$)))) - -;;Index -(define (generate-index) - ("1")) - -;;======================== -;;Title Pages for Books -;;======================= - -(define (book-titlepage-recto-elements) - (list (normalize "title") - (normalize "subtitle") - (normalize "authorgroup") - (normalize "author") - (normalize "date") - (normalize "releaseinfo") - (normalize "orgname") - (normalize "graphic") - (normalize "copyright") - (normalize "legalnotice"))) - - - - - - - - - -;;Index -(define ($insert.xref.page.number$) - ("yes")) - -;;Index -(define ($generate-index$) - ("1")) - -;;Tex Backend off -(define tex-backend - #f) - -;;What elements should have a LOT? -(define ($generate-book-lot-list$) - (list (normalize "example") - (normalize "table") - (normalize "figure") - (normalize "equation"))) - -;;======================== -;;Title Pages for Books -;;======================= - -(define (book-titlepage-recto-elements) - (list (normalize "title") - (normalize "subtitle") - (normalize "authorgroup") - (normalize "author") - (normalize "date") - (normalize "orgname") - (normalize "graphic") - (normalize "copyright") - (normalize "legalnotice") - (normalize "releaseinfo"))) - -(define %show-ulinks% - ;; Display URLs after ULinks? - #t) - -(define %indent-screen-lines% - ;; Indent lines in a 'Screen'? - " ") - - - - - - - - diff --git a/doc/UsersGuide/clm_ug.xml b/doc/UsersGuide/clm_ug.xml deleted file mode 100644 index 68aa69b1d6..0000000000 --- a/doc/UsersGuide/clm_ug.xml +++ /dev/null @@ -1,191 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - build-namelist"> - configure"> - NCAR"> - CCSM"> - CCSM4.0"> - CESM"> - CESM1.0"> - CESM1.0.1"> - CESM1.0.2"> - CESM1.0.3"> - - PTCLM"> - PTCLM1"> - PTCLM1.110504"> - CLM"> - CLMCN"> - CLMSP"> - CLMU"> - CLM3.0"> - CLM3.5"> - CLM4"> - CLM4.0.00"> - - - - - - - - DATM"> - models/lnd/clm/doc/KnownBugs"> - NetCDF"> - FORTRAN"> - FORTRAN-90"> - MPI"> - PIO"> - OpenMP"> - NCL"> - Perl"> - XML"> - xmlchange"> - 2"> - - - - env_run.xml"> - env_build.xml"> - env_conf.xml"> - user_nl_clm"> - - - PTS_MODE"> - CSMDATA"> - CLM_FORCE_COLDSTART"> - CLM_CONFIG_OPTS"> - CLM_BLDNML_OPTS"> - CLM_NML_USE_CASE"> - CLM_NAMELIST_OPTS"> - CLM_PT1_NAME"> - CLM_QIAN"> - CPLHIST3HrWx"> - CLM_USRDAT_NAME"> - CLM_CO2_TYPE"> - DIN_LOC_ROOT"> - - - CLM_QIAN"> - - - - %ISOamsa; - ]]> - - - - %ISOgrk1; - ]]> - -]> - - - - -&cesm; Research Tools: &clmrel; User's Guide Documentation - - - CESM - CLM - community earth system model - climate - climate model - earth system model - land surface model - hydrology - biogeochemistry - urban model - documentation - - - - - - Erik - Kluzek - - NCAR - - - - - - - -The user's guide to &clmrel; which is the active land surface model component of &cesmrel;. -The purpose of this guide is to instruct both the novice and experienced user, as well as -&clm; developers in the use of &clm4; for land-surface climate modeling. - - - -$URL$ - -&build_date; - - - - - -Dedicated to the Land Model Working Group, winners of the 2008 &ccsm; Distinguished Achievement Award. -May you continue to collaborate together well, and continue to drive -the science of land surface modeling forward with your diligent and persistent efforts. - - - - -&preface; -&custom; -&tools; -&adding_files; -&special_cases; -&single_point; -&ptclmdoc; -&trouble; -&testing; -&appendix; - - diff --git a/doc/UsersGuide/co2_streams.txt b/doc/UsersGuide/co2_streams.txt deleted file mode 100644 index 4d38c6d06e..0000000000 --- a/doc/UsersGuide/co2_streams.txt +++ /dev/null @@ -1,50 +0,0 @@ - - - This is a streams file to pass historical CO2 from datm8 to the other - surface models. It reads in a historical dataset derived from data used - by CAM. The getco2_historical.ncl script in components/clm/tools/ncl_scripts - was used to convert the CAM file to a streams compatible format (adding domain - information and making CO2 have latitude/longitude even if only for a single - point. - - - - Input stream description file for historical CO2 reconstruction data - - 04 March 2010: Converted to form that can be used by datm8 by Erik Kluzek - 18 December 2009: Prepared by B. Eaton using data provided by - Jean-Francois Lamarque. All variables except f11 are directly from - PRE2005_MIDYR_CONC.DAT. Data from 1765 to 2007 with 2006/2007 just - a repeat of 2005. - - - CLMNCEP - - - - time time - lonc lon - latc lat - area area - mask mask - - - /fis/cgd/cseg/csm/inputdata/atm/datm7/CO2 - - - fco2_datm_1765-2007_c100614.nc - - - - - CO2 co2diag - - - /fis/cgd/cseg/csm/inputdata/atm/datm7/CO2 - - - fco2_datm_1765-2007_c100614.nc - - - - diff --git a/doc/UsersGuide/custom.xml b/doc/UsersGuide/custom.xml deleted file mode 100644 index d6a60a28f7..0000000000 --- a/doc/UsersGuide/custom.xml +++ /dev/null @@ -1,1615 +0,0 @@ - - - - - - - component set - compset - "I" compsets - "B" compsets - "E" compsets - "F" compsets - &CLMCONFIG; - &CLMNAMELIST; - &CLMFORCECOLD; - &CLMUSECASE; - &CLM1PT; - &CLMUSRDAT; - &CLMCO2; - - -How to customize the configuration for a case with &clm; - - -The -&cesm; User's Guide gives you the details on how to setup, &configure;, build, and run -a case. That is the document to give you the details on using the &cesm; scripts. The purpose -of this document is to give you the details when using &cesm; with &clm; on how to customize -and use advanced features in &clm;. You should be familiar with the &cesm; User's Guide and -how to setup cases with &cesmrel; before referring to this document. - - -In this chapter we deal with three different ways of customizing a case: Choosing a compset, -Customizing Configuration options, and customizing the &clm; Namelist. There are many different -compsets that use &clm; and many are setup to enable special features of &clm; from the start. So -the first thing you want to be familiar with are the different options in the compsets. The -next section shows the different options for customizing the configuration options for -&clm;. -Here we introduce the &clm; &configure; and &buildnml; scripts and how using the options in -&envconf; you can customize the configuration and the initial -namelist. The final section tells you about the &clm; namelist and how you can customize the -namelist once you have run "&configure; -case" and have an initial namelist in -BuildConf/clm.buildnml.csh. You can also -use &envconf; options to change your namelist as well, before "&configure; -case" is -run. - - - -Choosing a compset using &clm; - - -When setting up a new case one of the first choices to make is which "component -set" (or compset) to use. The -compset refers to which component models are used as well as specific settings for them. We label the different -types of compsets with a different letter of the alphabet from "A" (for all data model) to "X" (for all dead model). -The compsets of interest when working with &clm; are the "I" compsets (which contain -&clm; with a data atmosphere model -and a stub ocean, and stub sea-ice models), "E" and "F" compsets (which contain &clm; -with the active atmosphere model (CAM), -prescribed sea-ice model, and a data ocean model), and "B" compsets which have all active components. Below we -go into details on the "I" compsets which emphasize &clm; as the only active model, and just mention the two other categories. - - -When working with &clm; you usually want to start with a relevant "I" compset before moving to the more -complex cases that involve other active model components. The "I" compsets can exercise -&clm; in a way that -is similar to the coupled modes, but with much lower computational cost and faster turnaround times. - - -Compsets coupled to data atmosphere and stub ocean/sea-ice ("I" compsets) -&compsets_list; - - - - Compsets coupled to active atmosphere with data ocean - - CAM compsets are compsets that start with "E" or "F" in the name. They are - described more fully in the scripts documentation or the CAM documentation. "E" compsets have -a slab ocean model while "F" compsets have a data ocean model. - - - - -Fully coupled compsets with fully active ocean, sea-ice, and atmosphere - - Fully coupled compsets are compsets that start with "B" in the name. They are - described more fully in the scripts documentation. - - - - -Conclusion to choosing a compset - -We've introduced the basic type of compsets that use &clm; and given some further details -for the "standalone &clm;" (or "I" compsets). The - -config_compsets.xml lists all of the compsets and gives a full description -of each of them. In the next section we look into customizing the &configure; time options -for compsets using &clm;. - - - - - - - -Customizing the &clm; configuration - -The "Creating a Case" section of the -&cesm1; Scripts -User's-Guide -gives instructions on creating a case. What is of interest here is how to customize your -use of &clm; -for the case that you created. In this section we discuss how to customize your case before the first -step -- the "&configure; -case" step is done. In the next section we will discuss how to customize your -&clm; namelist after "&configure; -case" has already been done. - - -For &clm; when "&configure; -case" is called there are two steps that take place: - - -The &clm; "&configure;" script is called to setup the build-time -configuration for &clm; (more information on &configure; is given in -). -The &clm; "&buildnml;" script is called to generate the initial -run-time namelist for &clm; (more information on &buildnml; is given below in -. - - -When customizing your case at the &configure; step you are able to modify the process by effecting either one -or both of these steps. The &clm; "&configure;" and "&buildnml;" scripts are both available in the "models/lnd/clm/bld" -directory in the distribution. Both of these scripts have a "-help" option that is useful to examine to see what -types of options you can give either of them. - - -There are five different types of customization for the configuration that we will -discuss: &cesm1; &clm; configuration items, Configure time User Namelist, -other noteworthy &cesm; configuration items, the &clm; &configure; script options, and -the &clm; &buildnml; script options. - - -Information on all of the script, configuration, build and run items is found under -scripts/ccsm_utils/Case.template -in the -config_definition.xml - file. - - - -&clm; Script configuration items - -Below we list each of the &cesm; configuration items that are specific to &clm;. All -of these are available in your: &envconf; file. - - - - &CLMCONFIG; - &CLMBLDNML; - &CLMNAMELIST; - &CLMFORCECOLD; - &CLMUSECASE; - &CLM1PT; - &CLMUSRDAT; - &CLMCO2; - -For the precedence of the different options to &buildnml; see the section on -precedence below. - - -The first item &CLMCONFIG; has to do with customizing the &clm; configuration options for your case, the rest -all have to do with generating the initial namelist. - - - -&CLMCONFIG; - - -The option &CLMCONFIG; is all about passing command line arguments to the &clm; &configure; script. It is important -to note that some compsets, may already put a value into the &CLMCONFIG; variable. You can still add more -options to your &CLMCONFIG; but make sure you add to what is already there rather than replacing it. Hence, -we recommend using the "-append" option to the xmlchange script. In - -below we will go into more details on options that can be customized in the &clm; "&configure;" script. It's -also important to note that the &clm; template may already invoke certain &clm; &configure; options and as such those -command line options are NOT going to be available to change at this step (nor would you want to change them). -The options to &configure; are given with the "-help" option which is given in -. - - - - - -&CLMUSECASE; - - -&CLMUSECASE; is used to set a particular set of conditions that set multiple namelist items, all centering around -a particular usage of the model. -To list the valid options do the following: - - -> cd models/lnd/clm/doc -> ../bld/&buildnml; -use_case list - - -The output of the above command is: - - -&usecases_list; - - - -See the section for the precedence of this -option relative to the others. - - - - - - -&CLMBLDNML; - - -The option &CLMBLDNML; is for passing options to the &clm; "&buildnml;" script. As with the "&configure;" -script the &clm; template may already invoke certain options and as such those options will NOT be available to be -set here. The best way to see what options can be sent to the "&buildnml;" script is to do - - -> cd models/lnd/clm/bld -> ./&buildnml; -help - - -Here is the output from the above. - - -./&buildnml_help; - - -The &clm; template already sets the resolution and mask as well as the &configure; file, -the start-type, the co2_ppmv, rtm_tstep, and rtm_res, and defines an input -namelist and namelist input file, and it normally sets either "-ignore_ic_year" or -"-ignore_ic_date". Also many -of the options are designed solely for &clm; stand-alone testing and hence should NOT -be used (any of the options starting -with a "datm_" or "drv_" prefix. Hence there are then only five different options that could be set: - - - --lnd_res --sim_year --rcp --clm_demand --verbose - - - -"-lnd_res" is used to run &clm; in fine-mesh mode at a higher resolution than the atmospheric model. This can -be useful to get higher resolution from the land model, but saving computer time -by running the more expensive atmospheric model at a lower resolution. -To get a list of valid resolutions to run at do the following: - - -> cd models/lnd/clm/doc -> ../bld/&buildnml; -lnd_res list - - - -The fine-mesh mode is considered experimental, and you may run into problems when you use -it. Another option is to use the CESM level "tri-grid" capability to run the land model -on a different grid than the atmospheric model. Read the CESM User's-Guide to learn how -to do this. - - - - - -See the section for the precedence of this -option relative to the others. - - - - -"-clm_demand" asks the &buildnml; step to require that the list of variables -entered be set. Typically, this is used to require that optional filenames be used and ensure -they are set before continuing. For example, you may want to require that -flanduse_timeseries be set to get dynamically changing vegetation types. To do this -you would do the following. - -> ./xmlchange -file env_conf.xml -id &CLMBLDNML; -val "-clm_demand flanduse_timeseries" - -To see a list of valid variables that you could set do this: - -> cd models/lnd/clm/doc -> ../bld/&buildnml; -clm_demand list - - - - -Using a 20th-Century transient compset or the 20thC_transient use-case -using &CLMUSECASE; would set this as well, but would also use -dynamic nitrogen and aerosol deposition files, so using -clm_demand would be a way -to get just dynamic vegetation types and NOT the other files as well. - - - -"-sim_year" is used to set the simulation year you want the data-sets to simulate conditions for in the input -datasets. The simulation "year" can also be a range of years in order to do simulations -with changes in the dataset values as the simulation progresses. To list the valid -options do the following: - - -> cd models/lnd/clm/doc -> ../bld/&buildnml; -sim_year list - - -"-rcp" is used to set the representative concentration pathway for the future scenarios -you want the data-sets to simulate conditions for, in the input -datasets. To list the valid options do the following: - - -> cd models/lnd/clm/doc -> ../bld/&buildnml; -rcp list - - - - - -&CLMNAMELIST; - - -The option &CLMNAMELIST; is for passing namelist items into the "clm_inparm" namelist. -Any items that are set in &CLMNAMELIST; will be set in your namelist after "&configure; --case" is done. - - - -For character namelist items you need to use "&apos;" as quotes for strings so that the -scripts don't get confused with other quotes they use. - - - -Example, you want to set hist_dov2xy to .false. -so that you get vector output to your history files. To do so edit -&envconf; and add a setting for hist_dov2xy. -So do the following: - -> ./xmlchange -file env_conf.xml -id &CLMNAMELIST; -val hist_dov2xy=.false. - - - -Example, you want to set hist_fincl1 to add the variable 'HK' -to your history files. To do so edit -&envconf; and add a setting for hist_fincl1. -So do the following: - -> ./xmlchange -file env_conf.xml -id &CLMNAMELIST; -val "hist_fincl1=&apos;HK&apos;" - -For a list of the history fields available see -&clm; History Fields. - - - -See the section for the precedence of this -option relative to the others. - - - - - - -&CLMCO2; - - -&CLMCO2; sets the type of input &CO2; for either "constant", "diagnostic" or prognostic". -If "constant" the value from CCSM_CO2_PPMV will be used. If "diagnostic" -or "prognostic" the values MUST be sent from the atmosphere model. For more information on how -to send &CO2; from the data atmosphere model see . - - - - - -&CLMFORCECOLD; - - -&CLMFORCECOLD; when set to on, requires that -your simulation do a cold start from arbitrary initial conditions. If this is NOT set, it -will use an initial condition file if it can find an appropriate one, and otherwise do a cold -start. &CLMFORCECOLD; is a good way to ensure that you are doing a cold -start if that is what you want to do. - - - - - -&CLM1PT; - - -&CLM1PT; is used ONLY for a pt1_pt1 -resolution simulation to set the name of the single-point files to use. -To see a list of the valid resolutions do this: - -> cd models/lnd/clm/doc -> ../bld/&buildnml; -res list - - - -The output of the above command is: - - -&res_list; - - -the valid resolutions that can be used with &CLM1PT; are the ones that -have city or nation names such as: 5x5_amazon, 1x1_vancouverCAN 1x1_mexicocityMEX, or -1x1_brazil. The "1x1_" prefix means the file is for a single-point, while "5x5_" prefix means -it's for a region of five points in latitude by five points in longitude. Both regional -and single point datasets can be used for &CLM1PT;. If you create your own datasets -you can also use &CLM1PT; along with &CLMUSRDAT; (documented below), setting &CLM1PT; to -the value in &CLMUSRDAT; so that your datasets are used rather than the standard ones.o - - - - - -&CLMUSRDAT; - - -&CLMUSRDAT; provides a way to enter your own datasets into the initial -namelist setup at "&configure; -case". The files you create must be named with -specific naming conventions outlined in: . -To see what the expected names of the files are, use the -queryDefaultNamelist.pl to see -what the names will need to be. For example if your &CLMUSRDAT; will -be "1x1_boulderCO", with a "navy" land-mask, constant simulation year range, for 1850, -the following will list what your filenames should be: - -> cd models/lnd/clm/bld -> queryDefaultNamelist.pl -usrname "1x1_boulderCO" -options \ -mask=navy,sim_year=1850,sim_year_range="constant" -csmdata $CSMDATA - -An example of using &CLMUSRDAT; for a simulation is given in -. - - - -See the section for the precedence of this -option relative to the others. - - - - - - - - - -Configure time User Namelist - -&CLMNAMELIST; as described above allows you to set any -extra namelist items you would like to appear in your namelist after first &configure;d. -However, it only allows you a single line to enter namelist items, and strings must -be quoted with &apos; which is a bit awkward. If you have a long list of namelist -items you want to set (such as a long list of history fields) a convenient way to do it -is to create a &usernlclm; that contains just the list of namelist -variables you want to add to your initial namelist. The &usernlclm; -will only be used when &configure; is run, so if you change it after &configure; -- it won't -change anything. The file needs to be in valid FORTRAN namelist format, and the &configure; -step will abort if there are syntax errors. It merely needs to be named correctly -&usernlclm; and placed in your case directory (where your other -env_*.xml files are). The namelist name actually doesn't have to be -valid, but all the variable names must be. Here's an example &usernlclm; -namelist that sets a bunch of history file related items, to create output history files -monthly, daily, every six and 1 hours. - -Example &usernlclm; namelist file - -&clmexp - hist_fincl2 = 'TG','TBOT','FIRE','FIRA','FLDS','FSDS', - 'FSR','FSA','FGEV','FSH','FGR','TSOI', - 'ERRSOI','BUILDHEAT','SABV','SABG', - 'FSDSVD','FSDSND','FSDSVI','FSDSNI', - 'FSRVD','FSRND','FSRVI','FSRNI', - 'TSA','FCTR','FCEV','QBOT','RH2M','H2OSOI', - 'H2OSNO','SOILLIQ','SOILICE', - 'TSA_U', 'TSA_R', - 'TREFMNAV_U', 'TREFMNAV_R', - 'TREFMXAV_U', 'TREFMXAV_R', - 'TG_U', 'TG_R', - 'RH2M_U', 'RH2M_R', - 'QRUNOFF_U', 'QRUNOFF_R', - 'SoilAlpha_U', - 'Qanth', 'SWup', 'LWup', 'URBAN_AC', 'URBAN_HEAT' - hist_fincl3 = 'TG:I', 'FSA:I', 'SWup:I', 'URBAN_AC:I', 'URBAN_HEAT:I', - 'TG_U:I', 'TG_R:I', - hist_fincl4 = 'TG', 'FSA', 'SWup', 'URBAN_AC', 'URBAN_HEAT' - hist_mfilt = 1, 30, 28, 24 - hist_nhtfrq = 0, -24, -6, -1 -/ - - - - -See the section for the precedence of this -option relative to the others. - - - - -In the above example we use an invalid namelist name &clmexp -- but it works anyway -because the &clm; &buildnml; knows the namelist that specific variable names belong to, and -it puts them there. - - -Obviously, all of this would be difficult to put in the &CLMNAMELIST; -variable, especially having to put &apos; around all the character strings. For -more information on the namelist variables being set here and what they mean, see -the section on &clm; namelists below, as well as the namelist definition that gives -details on each variable. - - - - -Precedence of Options - -Note: The precedence for setting the values of namelist variables with the -different env_conf options is (highest to lowest): - -Namelist values set by specific command-line options, like, -d, -sim_year -(i.e. &CLMBLDNML; env_conf variable) -Values set on the command-line using the -namelist option, -(i.e. &CLMNAMELIST; env_conf variable) -Values read from the file specified by -infile, -(i.e. &usernlclm; file) -Datasets from the -clm_usr_name option, -(i.e. &CLMUSRDAT; env_conf variable) -Values set from a use-case scenario, e.g., -use_case -(i.e. &CLMUSECASE;env_conf variable) -Values from the namelist defaults file. - -Thus a setting in &CLMBLDNML; will override a setting for the same thing given in -a use case with &CLMUSECASE;. Likewise, a setting in &CLMNAMELIST; will override a -setting in &usernlclm;. - - - - -Setting Your Initial Conditions File - -Especially with &clmcn; starting from initial conditions is very important. Even -with &clmsp; it takes many simulation years to get the model fully spunup. There -are a couple different ways to provide an initial condition file. - - - - - - - - -Your initial condition file MUST agree with the surface dataset you are using -to run the simulation. If the two files do NOT agree you will get a -run-time about a mis-match in PFT weights, or in the number of PFT's or -columns. To get around this you'll need to use -to interpolate your initial condition dataset. - - - - - -Doing a hybrid simulation to provide initial conditions - -The first option is to setup a hybrid simulation and give a -RUN_REFCASE and RUN_REFDATE to specify the -reference case simulation name to use. -When you setup most cases, at the standard resolutions of "f09" or "f19" it -will already do this for you. For example, if you run an "I2000CN" compset -at "f09_g16" resolution the following settings will already be done for you. - -./xmlchange -file env_conf.xml -id RUN_TYPE -val hybrid -./xmlchange -file env_conf.xml -id RUN_REFCASE -val I2000CN_f09_g16_c100503 -./xmlchange -file env_conf.xml -id RUN_REFDATE -val 0001-01-01 -./xmlchange -file env_conf.xml -id GET_REFCASE -val TRUE - -Setting the GET_REFCASE option to TRUE means it -will copy the files from the: -$DIN_LOC_ROOT/ccsm4_init/I2000CN_f09_g16_c100503/0001-01-01 -directory. Note, that the RUN_REFCASE and -RUN_REFDATE variables are expanded to get the directory name -above. If you do NOT set GET_REFCASE to TRUE then -you will need to have placed the file in your run directory yourself. In either -case, the file is expected to be named: -$RUN_REFCASE.clm2.r.$RUN_REFDATE-00000.nc with the variables -expanded of course. - - - - -Doing a branch simulation to provide initial conditions - -The setup for running a branch simulation is essentially the same as for a hybrid. -With the exception of setting RUN_TYPE to branch -rather than hybrid. A branch simulation runs the case essentially -as restarting from it's place before to exactly reproduce it. While a hybrid simulation -allows you to change namelist items, and use a different code base that may have -fewer fields on it than a full restart file. The GET_REFCASE works -similarily for a branch case as for a hybrid. - - - - -Providing a finidat file in your &usernlclm; file - -Setting up a branch or hybrid simulation requires the initial condition file -to follow a standard naming convention, and a standard input directory if you -use the GET_REFCASE option. If you want to name your file willy -nilly and place it anywhere, you can set it in your &usernlclm; file as in this -example. - -&clm_inparm - finidat = '/glade/home/$USER/myinitdata/clmi_I1850CN_f09_g16_0182-01-01.c120329.nc' -/ - -Note, if you provide an initial condition file -- you can NOT set &CLMFORCECOLD; to -TRUE. - - - - -Adding a finidat file to the XML database - -Like other datasets, if you want to use a given initial condition file to -be used for all (or most of) your cases you'll want to put it in the XML -database so it will be used by default. The initial condition files, are -resolution dependent, and dependent on the number of PFT's and other variables -such as GLC_NEC or if irrigation is on or off. -See for more information on this. - - - - - -Other noteworthy configuration items - -For running "I" cases there are several other noteworthy configuration items that -you may want to work with. Most of these involve settings for the &datm;, but one -CCSM_CO2_PPMV applies to all models. If you are running an B, E, -or F case that doesn't use the &datm; obviously the DATM_* settings will not be used. -All of the settings below are in your &envconf; file - - CCSM_CO2_PPMV - CCSM_VOC - DATM_MODE - DATM_PRESAERO - DATM_CLMNCEP_YR_ALIGN - DATM_CLMNCEP_YR_START - DATM_CLMNCEP_YR_END - DATM_CPL_CASE - DATM_CPL_YR_ALIGN - DATM_CPL_YR_START - DATM_CPL_YR_END - - - - - -CCSM_CO2_PPMV - -CCSM_CO2_PPMV sets the mixing ratio of &CO2; in -parts per million by volume for ALL &cesm; components to use. Note that most compsets -already set this value to something reasonable. Also note that some compsets may -tell the atmosphere model to override this value with either historic or ramped -values. If the CCSM_BGC variable is set to something other than "none" -the atmosphere model will determine &CO2;, and &clm; will listen -and use what the atmosphere sends it. On the &clm; side the namelist item -co2_type tells &clm; to use the value sent from the atmosphere rather than -a value set on it's own namelist. - - - - -CCSM_VOC - -CCSM_VOC enables passing of the Volatile Organic Compounds (VOC) from -&clm; to the atmospheric model. This of course is only important if the atmosphere -model is a fully active model that can use these fields in it's chemistry calculations. - - - - -DATM_MODE - -DATM_MODE sets the mode that the &datm; model should run in this determines -how data is handled as well as what the source of the data will be. Many of the modes -are setup specifically to be used for ocean and/or sea-ice modeling. The modes -that are designed for use by &clm; are: - -&CLMQIAN; -CLM1PT -&CPLHIST; - - - -&CLMQIAN; is for the standard mode of using global atmospheric data -that was developed by Qian et. al. for &clm; using NCEP data from 1948 to 2004. -See for more information on -the &datm; settings for &CLMQIAN; mode. -CLM1PT is for the special cases where we have single-point tower -data for particular sites. Right now we only have data for three urban locations: -MexicoCity Mexico, Vancouver Canada, and the urban-c alpha site. -See for more information on -the &datm; settings for CLM1PT mode. -&CPLHIST; is for running with atmospheric forcing from a previous &cesm; simulation. -See for more information on -the &datm; settings for &CPLHIST; mode. - - -There is a problem with running simulations for the CLM1PT mode -that are greater than one data cycle, where the atm forcing will be held constant. -This will result in useless -results as all atmosphere forcing fields will be held constant at the last value. -See bug 1377 in the &KnownBugs; file on how to fix this problem. - - - - - - - -DATM_PRESAERO - -DATM_PRESAERO sets the prescribed aerosol mode for the data atmosphere -model. The list of valid options include: - -clim_1850 = constant year 1850 conditions -clim_2000 = constant year 2000 conditions -trans_1850-2000 = transient 1850 to year 2000 conditions -rcp2.6 = transient conditions for the rcp=2.6 -W/m2 future -scenario -rcp4.5 = transient conditions for the rcp=4.5 -W/m2 future -scenario -rcp6.0 = transient conditions for the rcp=6.0 -W/m2 future -scenario -rcp8.5 = transient conditions for the rcp=8.5 -W/m2 future -scenario -pt1_pt1 = read in single-point or regional datasets - - - - - -DATM_CLMNCEP_YR_START - -DATM_CLMNCEP_YR_START sets the beginning year to cycle the atmospheric -data over for the &CLMQIAN; mode. - - - - -DATM_CLMNCEP_YR_END - -DATM_CLMNCEP_YR_END sets the ending year to cycle the atmospheric -data over for the &CLMQIAN; mode. - - - - -DATM_CLMNCEP_YR_ALIGN - -DATM_CLMNCEP_YR_START and DATM_CLMNCEP_YR_END determine -the range of years to cycle the atmospheric data over, and DATM_CLMNCEP_YR_ALIGN -determines which year in that range of years the simulation will start with. - - - - -DATM_CPL_CASE - -DATM_CPL_CASE sets the casename to use for the &CPLHIST; mode. - - - - -DATM_CPL_YR_START - -DATM_CPL_YR_START sets the beginning year to cycle the atmospheric -data over for the &CPLHIST; mode. - - - - -DATM_CPL_YR_END - -DATM_CPL_YR_END sets the ending year to cycle the atmospheric -data over for the &CPLHIST; mode. - - - - -DATM_CPL_YR_ALIGN - -DATM_CPL_YR_START and DATM_CPL_YR_END determine -the range of years to cycle the atmospheric data over, and DATM_CPL_YR_ALIGN -determines which year in that range of years the simulation will start with. - - - - - - - - -Downloading DATM Forcing Data - -In Chapter One of the -&cesm; User's Guide -there is a section on "Downloading input data". The normal process of setting up -cases will use the "scripts/ccsm_utils/Tools/check_input_data" script to retrieve -data from the &cesm; subversion inputdata repository. However, the DATM forcing data -is unique -- because it is large compared to the rest of the input data (56 Gbytes). Most of the -data is stored in the directory set by the &envrun; variable -DIN_LOC_ROOT_CSMDATA. The &CLMQIAN; forcing data is in a (possibly) -separate directory using the &envrun; variable DIN_LOC_ROOT_CLMQIAN. -In most cases this directory will be in the directory: -atm/datm7/atm_forcing.datm7.Qian.T62.c080727 under -DIN_LOC_ROOT_CSMDATA. On bluefire there is a separate path for -the &CLMQIAN; forcing data. We have the full set of data available on a few of -the machines we use: bluefire, jaguarpf, and edinburgh. As of October, 18th, 2011 -we've uploaded the entire set of forcing data into the input data repository so -now it can be treated like other input datasets and the check_input_data script -can retreive it for you. Previously only two years of data was available. -You can also download the data from the -Earth System Grid for other machines. See the -Model Forcing Data -link under the -&clm; Documentation Page - - - - -Customizing via the template files - -The final thing that the user may wish to do before &configure; is run is to edit -the template files which determine the configuration and initial namelist. The -variables in &envconf; typically mean you will NOT have -to edit the template. But, there are rare instances where it is useful to do so. - gives the details on how to do this. -The template files are copied to your case directory and are available under -Tools/Templates. -The list of template files you might wish to edit are: - - clm.cpl7.template - datm.cpl7.template - cpl.template - - - - - - -More information on the &clm; &configure; script - -The &configure; script defines the details of a clm configuration and summarizes it into a -config_cache.xml file. The config_cache.xml -will be placed in your case directory under Buildconf/clmconf. -The config_definition.xml -in models/lnd/clm/bld/config_files -gives a definition of each &clm; configuration item, it is viewable in a web-browser. -Many of these items are things that you would NOT change, but looking through the -list gives you the valid options, and a good description of each. Below we repeat -the config_definition.xml files contents: - - - - - -Help on &clm; &configure; - -Coupling this with looking at the options to &configure; with -"-help" as below will enable you to understand how to set the different options. - -> cd models/lnd/clm/bld -> &configure; -help - - - -The output to the above command is as follows: - - -&config_help; - - -We've given details on how to use the options in &envconf; to -interact with the &clm; "&configure;" and "&buildnml;" scripts, as well as giving a good -understanding of how these scripts work and the options to them. In the next section we -give further details on the &clm; namelist. You could customize the namelist for these -options after "&configure; -case" is run. - - - - - - - - - - Customizing the &clm; namelist - -Once a case is &configure;d, we can then customize the case further, by editing the -run-time namelist for &clm;. First let's list the definition of each namelist -item and their valid values, and then we'll list the default values for them. -Next for some of the most used or tricky namelist items we'll give examples of their -use, and give you example namelists that highlight these features. - - - - -Definition of Namelist items and their default values - -Here we point to you where you can find the definition of each namelist item and -separately the default values for them. The default values may change depending on -the resolution, land-mask, simulation-year and other attributes. Both of these -files are viewable in your web browser. Below we provide the link for them, and -then expand each in turn. - - - -Definition of each Namelist Item - - -Default values of each -&clm; Namelist Item - - - -One set of the namelist items allows you to add fields to the output history files: -hist_fincl1, hist_fincl2, -hist_fincl3, hist_fincl4, -hist_fincl5, and hist_fincl6. The link -&clm; History Fields -documents all of the history fields available and gives the long-name and units -for each. - - - - - -&hisfldtbl; - - - - - - - -Examples of using different namelist features - -Below we will give examples of user namelists that activate different commonly used -namelist features. We will discuss the namelist features in different examples and then -show a user namelist that includes an example of the use of these features. First we -will show the default namelist that doesn't activate any user options. - - - -The default namelist - -Here we give the default namelist as it would be created for a I1850CN compset at 0.9x1.25 -resolution with a gx1v6 land-mask. To edit the namelist you would edit the -BuildConf/clm.buildnml.csh under your case (or before &configure; -include a user namelist with just the items you want to change). For simplicity we will -just show the namelist and NOT the entire file. In the sections below, for simplicity - we will just show the user namelist (&usernlclm;) that will add (or modify existing) -namelist items to the namelist. Again, just adding the &usernlclm; file to your case -directory, before "&configure; -case" is invoked will cause the given namelist items to -appear in your &clm; namelist. - -Default &clm; Namelist - -&clm_inparm - co2_ppmv = 284.7 - co2_type = 'constant' - create_crop_landunit = .false. - dtime = 1800 - fatmgrid = '$DIN_LOC_ROOT/lnd/clm2/griddata/griddata_0.9x1.25_070212.nc' - fatmlndfrc = -'$DIN_LOC_ROOT/lnd/clm2/griddata/fracdata_0.9x1.25_gx1v6_c090317.nc' - finidat = 'I1850CN_f09_g16_c100503.clm2.r.0001-01-01-00000.nc' - fpftcon = '$DIN_LOC_ROOT/lnd/clm2/pftdata/pft-physiology.c110425.nc' - frivinp_rtm = '$DIN_LOC_ROOT/lnd/clm2/rtmdata/rdirc_0.5x0.5_simyr2000_slpmxvl_c120717.nc' - fsnowaging = -'$DIN_LOC_ROOT/lnd/clm2/snicardata/snicar_drdt_bst_fit_60_c070416.nc' - fsnowoptics = -'$DIN_LOC_ROOT/lnd/clm2/snicardata/snicar_optics_5bnd_c090915.nc' - fsurdat = -'$DIN_LOC_ROOT/lnd/clm2/surfdata/surfdata_0.9x1.25_simyr1850_c091006.nc' - ice_runoff = .true. - outnc_large_files = .true. - rtm_nsteps = 6 - urban_hac = 'ON_WASTEHEAT' - urban_traffic = .false. -/ -&ndepdyn_nml - stream_fldfilename_ndep = -'$DIN_LOC_ROOT/lnd/clm2/ndepdata/fndep_clm_hist_simyr1849-2006_1.9x2.5_c100428.nc' - stream_year_first_ndep = 1850 - stream_year_last_ndep = 1850 -/ - - -Note that the namelist introduces some of the history namelist options that will be -talked about in further detail below (hist_mfilt and -hist_nhtfrq). - - - - -Adding/removing fields on your primary history file - -The primary history files are output monthly, and contain an extensive list of -fieldnames, but the list of fieldnames can be added to using hist_fincl1 -or removed from by adding fieldnames to hist_fexcl1. -A sample user namelist &usernlclm; adding few new fields -(cosine of solar zenith angle, and solar declination) and excluding a few -standard fields is (ground temperature, vegetation temperature, soil temperature and soil water).: - -Example &usernlclm; namelist adding and removing fields on primary history file - -&clm_inparm - hist_fincl1 = 'COSZEN', 'DECL' - hist_fexcl1 = 'TG', 'TV', 'TSOI', 'H2OSOI' -/ - - - - - - -Adding auxiliary history files and changing output -frequency - -The hist_fincl2 through hist_fincl6 set of -namelist variables add given history fieldnames to auxiliary history file "streams", and -hist_fexcl2 through hist_fexcl6 set of -namelist variables remove given history fieldnames from history file auxiliary "streams". -A history "stream" is a set of history files that are produced at a given frequency. -By default there is only one stream of monthly data files. To add more streams you -add history fieldnames to hist_fincl2 through -hist_fincl6. The output frequency and the way averaging is done -can be different for each history file stream. By default the primary history files -are monthly and any others are daily. You can have up to six active history streams, but you need -to activate them in order. So if you activate stream "6" by setting -hist_fincl6, but if any of hist_fincl2 through -hist_fincl5 are unset, only the history streams up to the first blank one -will be activated. - - -The frequency of the history file streams is given by the namelist variable -hist_nhtfrq which is an array of rank six for each history stream. -The values of the array hist_nhtfrq must be integers, where the -following values have the given meaning: - -Positive value means the output frequency is the number of -model steps between output. - -Negative value means the output frequency is the absolute -value in hours given (i.e -1 would mean an hour and -24 would mean a full day). Daily -(-24) is the default value for all auxiliary files. - -Zero means the output frequency is monthly. This is the -default for the primary history files. - - - - -The number of samples on each history file stream is given by the namelist variable -hist_mfilt which is an array of rank six for each history stream. -The values of the array hist_mfilt must be positive integers. By -default the primary history file stream has one time sample on it (i.e. output is -to separate monthly files), and all other streams have thirty time samples on them. - - -A sample user namelist &usernlclm; turning on four extra file -streams for output: daily, six-hourly, hourly, and every time-step, -leaving the primary history files as monthly, and changing the number -of samples on the streams to: yearly (12), thirty, weekly (28), daily (24), and daily -(48) is: - -Example &usernlclm; namelist adding auxiliary history files and changing output frequency - -&clm_inparm - hist_fincl2 = 'TG', 'TV' - hist_fincl3 = 'TG', 'TV' - hist_fincl4 = 'TG', 'TV' - hist_fincl5 = 'TG', 'TV' - hist_nhtfrq = 0, -24, -6, -1, 1 - hist_mfilt = 12, 30, 28, 24, 48 -/ - - - - - - -Removing all history fields - -Sometimes for various reasons you want to remove all the history fields either -because you want to do testing without any output, or you only want a very small -custom list of output fields rather than the default extensive list of fields. -By default only the primary history files are active, so technically using -hist_fexcl1 explained in the first example, you could list -ALL of the history fields that are output in -hist_fexcl1 and then you wouldn't get any output. However, as -the list is very extensive this would be a cumbersome thing to do. So to facilitate -this hist_empty_htapes allows you to turn off all default output. -You can still use hist_fincl1 to turn your own list of fields -on, but you then start from a clean slate. -A sample user namelist &usernlclm; turning off all history -fields and then activating just a few selected fields (ground and vegetation temperatures -and absorbed solar radiation) is: - -Example &usernlclm; namelist removing all history fields - -&clm_inparm - hist_empty_htapes = .true. - hist_fincl1 = 'TG', 'TV', 'FSA' -/ - - -Note, you could also build adding the "-noio" option to &CLMCONFIG;. But, this would -build the model without history output and you wouldn't be able to add that in later. - - - - -Various ways to change history output averaging flags - -There are two ways to change the averaging of output history fields. The first is using -hist_avgflag_pertape which gives a default value for each history -stream, the second is when you add fields using hist_fincl*, you add -an averaging flag to the end of the field name after a colon (for example 'TSOI:X', would -output the maximum of TSOI). -The types of averaging that can be done are: - -A Average, over the output interval. -I Instantaneous, output the value at the output interval. -X Maximum, over the output interval. -M Minimum, over the output interval. - - -The default averaging depends on the specific fields, but for most fields is an average. -A sample user namelist &usernlclm; making the monthly output -fields all averages (except TSOI for the first two streams and FIRE for the 5th stream), -and adding auxiliary file streams for instantaneous (6-hourly), -maximum (daily), minimum (daily), and average (daily). For some of the fields we -diverge from the per-tape value given and customize to some different type of -optimization. - -Example &usernlclm; namelist with various ways to average history fields - -&clm_inparm - hist_empty_htapes = .true. - hist_fincl1 = 'TSOI:X', 'TG', 'TV', 'FIRE', 'FSR', 'FSH', - 'EFLX_LH_TOT', 'WT' - hist_fincl2 = 'TSOI:X', 'TG', 'TV', 'FIRE', 'FSR', 'FSH', - 'EFLX_LH_TOT', 'WT' - hist_fincl3 = 'TSOI', 'TG:I', 'TV', 'FIRE', 'FSR', 'FSH', - 'EFLX_LH_TOT', 'WT' - hist_fincl4 = 'TSOI', 'TG', 'TV:I', 'FIRE', 'FSR', 'FSH', - 'EFLX_LH_TOT', 'WT' - hist_fincl5 = 'TSOI', 'TG', 'TV', 'FIRE:I', 'FSR', 'FSH', - 'EFLX_LH_TOT', 'WT' - hist_avgflag_pertape = 'A', 'I', 'X', 'M', 'A' - hist_nhtfrq = 0, -6, -24, -24, -24 -/ - - - - - -In the example we put the same list of fields on each of the tapes: soil-temperature, -ground temperature, vegetation temperature, emitted longwave radiation, reflected -solar radiation, sensible heat, total latent-heat, and total water storage. We also -modify the soil-temperature for the primary and secondary auxiliary tapes by outputting -them for a maximum instead of the prescribed per-tape of average and instantaneous -respectively. For the tertiary auxiliary tape we output ground temperature instantaneous -instead of as a maximum, and for the fourth auxiliary tape we output vegetation -temperature instantaneous instead of as a minimum. Finally, for the fifth auxiliary -tapes we output FIRE instantaneously instead of as an average. - - - - -We also use hist_empty_htapes as in the previous example, -so we can list ONLY the fields that we want on the primary history tapes. - - - - - -Outputting history files as a vector in order to analyze -the plant function types within gridcells - -By default the output to history files are the grid-cell average of all land-units, and -vegetation types within that grid-cell, and output is on the -full 2D latitude/longitude grid with ocean masked out. Sometimes it's important to -understand how different land-units or vegetation types are acting within a grid-cell. -The way to do this is to output history files as a 1D-vector of all land-units and vegetation -types. In order to display this, you'll need to do extensive post-processing to make sense -of the output. Often you may only be interested in a few points, so once you figure out the -1D indices for the grid-cells of interest, you can easily view that data. 1D vector output -can also be useful for single point datasets, since it's then obvious that all data is for the -same grid cell. - - -To do this you use hist_dov2xy which is an array of rank six for -each history stream. Set it to -.false. if you want one of the history streams to be a 1D vector. -You can also use hist_type1d_pertape if you want to average over all the: -Plant-Function-Types, columns, land-units, or grid-cells. -A sample user namelist &usernlclm; leaving the primary monthly -files as 2D, and then doing grid-cell (GRID), column (COLS), -and no averaging over auxiliary tapes output daily for a single field -(ground temperature) is: - -Example &usernlclm; namelist outputting some files in 1D Vector format - -&clm_inparm - hist_fincl2 = 'TG' - hist_fincl3 = 'TG' - hist_fincl4 = 'TG' - hist_fincl5 = 'TG' - hist_fincl6 = 'TG' - hist_dov2xy = .true., .false., .false., .false. - hist_type2d_pertape = ' ', 'GRID', 'COLS', ' ' - hist_nhtfrq = 0, -24, -24, -24 -/ - - - - -LAND and COLS are also options to the pertape averaging, but currently there is a bug -with them and they fail to work. - - - - - -Technically the default for hist_nhtfrq is for primary files -output monthly and the other auxiliary tapes for daily, so we don't actually have -to include hist_nhtfrq, we could use the default for it. Here -we specify it for clarity. - - - - -Visualizing global 1D vector files will take effort. You'll probably want -to do some post-processing and possibly just extract out single points of interest -to see what is going on. Since, the output is a 1D vector, of only land-points -traditional plots won't be helpful. The number of points per grid-cell will also -vary for anything, but grid-cell averaging. You'll need to use the output fields -pfts1d_ixy, and pfts1d_jxy, to get the mapping -of the fields to the global 2D array. pfts1d_itype_veg gives you -the PFT number for each PFT. Most likely you'll want to do this analysis in a -data processing tool (such as NCL, Matlab, Mathmatica, IDL, etcetera that is able -to read and process &netcdf; data files). - - - - - -Outputting multi-layer snow history fields - -A number of history fields provide information about individual snow layers: -SNO_ABS, SNO_T, SNO_GS, -SNO_Z, SNO_LIQH2O, -SNO_ICE, SNO_TK, and -SNO_BW; there is also an auxiliary field to aid -interpretation: SNO_EXISTENCE (described below). These fields -are inactive by default, but can be enabled like other history fields. If the -maximum number of snow layers is 5 (for example), then the layers of these -fields are arranged on the history file so that layer 1 is at the top of the -snow pack, and layer 5 only exists if the snow is deep enough to support all -layers. - - -Because snow layers can come into and out of existence, these fields can be -challenging to interpret. It is easiest to analyze these fields if you do output -every time step, and do not average to the grid cell (i.e., dov2xy = -.false.). Otherwise, a few principles should be kept in mind when -working with these fields: - -Temporal averages are taken only over times when a given snow layer exists -Grid cell averages are taken only over columns in which a given snow layer exists -SNO_EXISTENCE gives the fraction of the averaging -period in which a given snow layer existed. For grid cell averages, this gives -the weighted spatial fraction of the columns in which a snow layer existed for -this averaging period. This is most useful for subsetting grid cells for -analysis. For example, grid cells that have SNO_EXISTENCE = 1 -for all snow layers can be analyzed most easily. - - - -Here is a simple example illustrating this averaging; this considers a given -snow layer, L: - - -Assume a grid cell with 2 columns, with averaging done over 4 time steps. Column -#1 has subgrid weight 0.2, and no snow in layer L in any time -step. Column #2 has subgrid weight 0.8, and has snow in layer -L in time steps 3 and 4; the snow field of interest has -values 1.0 and 2.0 in these two time steps, for this layer. - - -SNO_EXISTENCE is then 0.8*(2/4) = 0.4. The snow field's value -would be 1.5 (note that times and columns with no snow in this layer are simply -ignored). - - -Finally, note that the SNOABS field is not computed for urban -columns, so it will have a missing value if snow only exists over urban columns -for a given snow layer. - - - - -Conclusion to namelist examples - -We've given various examples of namelists that feature the use of different namelist options -to customize a case for particular uses. Most the examples revolve around how to customize the -output history fields. This should give you a good basis for setting up your own &clm; namelist. - - - - - - - - - -Customizing the &datm; Namelist and Streams files - -When running "I" compsets with &clm; you use the &datm; model to give atmospheric -forcing data to &clm;. There are four ways to customize &datm;: - - -&datm; Main Namelist (datm_in) - - -&datm; Stream Namelist (datm_atm_in) - - -&datm; stream files - - -&datm; template file -(Tools/Templates.datm.cpl7.template) - - -The -Data Model Documentation gives the details of all the options for the data -models and for &datm; specifically. It goes into detail on all namelist items both for -&datm; -and for &datm; streams. It shows examples of stream files and talks about their use. In - we talk about editing the CLM and &datm; -template files. So here we won't talk about the &datm; template file, and we won't list -ALL of the &datm; namelist options, nor go into great details about stream files. But, -we will talk about a few of the different options that are relevant for running with -&clm;. All of the options for changing the namelists or stream files is done by editing -the Buildconf/datm.buildnml.csh file. - - -Because, they aren't useful for work with &clm; we will NOT discuss any of the options -for the main &datm; namelist. Use the &datm; Users Guide at the link above to find -details of that. For the streams namelist we will discuss three items: - - -mapalgo - - -taxmode - - -tintalgo - - -And for the streams file itself we will discuss: - - offset - -Again everything else (and including the above items) are discussed in the Data Model -User's Guide. Of the above the last three: offset, taxmode and tintalgo are all closely -related and have to do with the time interpolation of the &datm; data. - - - - -mapalgo - -mapalgo sets the spatial interpolation method to go from the -&datm; input data to the output &datm; model grid. The default is -bilinear. For CLM1PT we set it to nn to just -select the nearest neighbor. This saves time and we also had problems running the -interpolation for single-point mode. - - - - -taxmode - -taxmode is the time axis mode. For &clm; we usually have it -set to cycle which means that once the end of the data is reached -it will start over at the beginning. The extend modes is used -have it use the last time-step of the forcing data once it reaches the end of forcing -data (or use the first time-step before it reaches where the forcing data starts). -See the warning below about the extend mode. - - -THE extend OPTION NEEDS TO BE USED WITH CAUTION! -It is only invoked by default for the CLM1PT mode and is only intended for the -supported urban datasets to extend the data for a single time-step. If you have the -model run extensively through periods in this mode you will effectively -be repeating that last time-step over that entire period. This means the -output of your simulation will be worthless. See bug 1377 in the &KnownBugs; file for -more information on this issue. - - - - - - -offset (in the stream file) - -offset is the time offset in seconds to give to each stream -of data. Normally it is NOT used because the time-stamps for data is set correctly -for each stream of data. Note, the offset may NEED to be -adjusted depending on the taxmode described above, or it may -need to be adjusted to account for data that is time-stamped at the END of an -interval rather than the middle or beginning of interval. The -offset can is set in the stream file rather than on the -stream namelist. For data with a taxmode method of -coszen the time-stamp needs to be for the beginning of the interval, -while for other data it should be the midpoint. The offset can be -used to adjust the time-stamps to get the data to line up correctly. - - - - -tintalgo - -tintalgo is the time interpolation algorithm. For &clm; we usually -use one of three modes: coszen, nearest, or -linear. We use coszen for solar data, -nearest for precipitation data, and linear -for everything else. If your data is half-hourly or hourly, nearest -will work fine for everything. The coszen scaling is useful for -longer periods (three hours or more) to try to get the solar to match the cosine of -the solar zenith angle over that longer period of time. If you use -linear for longer intervals, the solar will cut out at night-time -anyway, and the straight line will be a poor approximation of the cosine of the -solar zenith angle of actual solar data. nearest likewise would -be bad for longer periods where it would be much higher than the actual values. - - -For coszen the time-stamps of the data should correspond to the -beginning of the interval the data is measured for. Either make sure the time-stamps -on the datafiles is set this way, or use the offset described above -to set it. - - - - -For nearest and linear the time-stamps of the -data should correspond to the middle of the interval the data is measured for. Either -make sure the time-stamps on the datafiles is set this way, or use the -offset described above to set it. - - - - - - -In the sections below we go over each of the relevant DATM_MODE -options and what the above &datm; settings are for each. This gives you examples -of actual usage for the settings. We also describe in what ways you might want -to customize them for your own case. - - - -&CLMQIAN; mode and it's &datm; settings - -In &CLMQIAN; mode the Qian dataset is used which has 6-hourly -solar and precipitation data, and 3-hourly for everything else. -The dataset is divided into those three data streams: solar, precipitation, -and everything else (temperature, pressure, humidity and wind). The time-stamps -of the data were also adjusted so that they are the beginning of the interval -for solar, and the middle for the other two. Because, of this the -offset is set to zero, and the tintalgo -is: coszen, nearest, and -linear for the solar, precipitation and other data -respectively. taxmode is set to cycle -and mapalgo is set to bilinear so that -the data is spatially interpolated from the input T62 grid to the grid the atmosphere -model is being run at. - - -Normally you wouldn't customize the &CLMQIAN; settings, but you might replicate -it's use for your own global data that had similar temporal characteristics. - - - - -CLM1PT mode and it's &datm; settings - -In CLM1PT mode the model is assumed to have half-hourly or hourly data -for a single-point. For the supported datasets that is exactly what it has. -But, if you add your own data you may need to make adjustments accordingly. -Using the &CLMUSRDAT; option you can easily extend this mode for your own -datasets that may be regional or even global and could be at different temporal -frequencies. If you do so you'll need to make adjustments to your &datm; settings. -The dataset has all data in a single stream file. The time-stamps -of the data were also adjusted so that they are at the middle of the interval. -Because, of this the offset is set to zero, and the -tintalgo is set to nearest. -taxmode is set to extend -and mapalgo is set to nn so that -simply the nearest point is used. - - -If you are using your own data for this mode and it's not at least hourly -you'll want to adjust the &datm; settings for it. If the data is three or -six hourly, you'll need to divide it up into separate streams like in -&CLMQIAN; mode which will require fairly extensive changes to the &datm; -namelist and streams files. For an example of doing this see -. - - - - -&CPLHIST; mode and it's &datm; settings - -In &CPLHIST; mode the model is assumed to have 3-hourly for a global grid from -a previous &cesm; simulation. Like &CLMQIAN; mode the data is divided into -three streams: one for precipitation, one for solar, and one for everything else. -The time-stamps for Coupler history files for &cesm; is at the end of the interval, -so the offset needs to be set in order to adjust the time-stamps to what it needs -to be for the tintalgo settings. For precipitation -taxmode is set to nearest so the -offset is set to -5400 seconds so that -the ending time-step is adjusted by an hour and half to the middle of the interval. -For solar taxmode is set to coszen so the -offset is set to -10800 seconds so that -the ending time-step is adjust by three hours to the beginning of the interval. -For everything else taxmode is set to -linear so the offset is set to --5400 seconds so that the ending time-step is adjusted by an -hour and half to the middle of the interval. - - -Normally you wouldn't modify the &datm; settings for this mode. However, if you -had data at a different frequency than 3-hours you would need to modify the -offset and possibly the taxmode. The other -two things that you might modify would be the path to the data (which you can -change in the &datm; template see ) or -the domain file for the resolution (which is currently hardwired to f09). For -data at a different input resolution you would need to change the domain file -in the streams file to use a domain file to the resolution that the data comes in -on. - - - - - - - -Conclusion to customizing chapter - -We've given extensive details on customizing cases with &clm;, by choosing compsets, by changing -&configure; options and interacting with the &clm; "&configure;" and "&buildnml;" scripts, -we've given details on all of the &clm; namelist items, and finally given some -instruction in customizing the &datm; namelist and streams files. In the next chapter we talk -about further ways to customize cases with &clm; by creating your own datasets using the tools -provided in &clm;. - - - - diff --git a/doc/UsersGuide/get_Icaselist.pl b/doc/UsersGuide/get_Icaselist.pl deleted file mode 100755 index 6aefc26159..0000000000 --- a/doc/UsersGuide/get_Icaselist.pl +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/env perl -#----------------------------------------------------------------------------------------------- -# -# get_Icaselist.pl -# -# This utility gets a list of the I cases from the CCSM compset database. -# -#----------------------------------------------------------------------------------------------- - -use strict; -use Cwd; -use English; -use Getopt::Long; -use IO::File; -use IO::Handle; -#----------------------------------------------------------------------------------------------- - -sub usage { - die <autoflush(); - -#----------------------------------------------------------------------------------------------- -my $cwd = getcwd(); # current working directory -my $cfgdir; # absolute pathname of directory that contains this script -$cfgdir = $cwd; - -#----------------------------------------------------------------------------------------------- -# Parse command-line options. -my %opts = ( - ); -GetOptions( - "h|help" => \$opts{'help'}, -) or usage(); - -# Give usage message. -usage() if $opts{'help'}; - -# Check for unparsed argumentss -if (@ARGV) { - print "ERROR: unrecognized arguments: @ARGV\n"; - usage(); -} - -# Check for manditory case input if not just listing valid values - -my %cfg = (); # build configuration - -#----------------------------------------------------------------------------------------------- - -# Check for the configuration definition file. -my $config_def_file = "config_definition.xml"; -my $case_def_dir = "$cfgdir/../../../../../scripts/ccsm_utils/Case.template"; -(-f "$case_def_dir/$config_def_file") or die <<"EOF"; -** Cannot find configuration definition file \"$config_def_file\" in directory - \"$case_def_dir\" ** -EOF - -# Compset definition file. -my $compset_file = 'config_compsets.xml'; -(-f "$case_def_dir/$compset_file") or die <<"EOF"; -** Cannot find compset parameters file \"$compset_file\" in directory - \"$case_def_dir\" ** -EOF - -my $xml_dir = "$cfgdir/../../../../../scripts/ccsm_utils/Tools/perl5lib"; -# The XML::Lite module is required to parse the XML configuration files. -(-f "$xml_dir/XML/Lite.pm") or die <<"EOF"; -** Cannot find perl module \"XML/Lite.pm\" in directory - \"$xml_dir\" ** -EOF - - -#----------------------------------------------------------------------------------------------- -my @dirs = ( $cfgdir, $xml_dir, $case_def_dir ); -unshift @INC, @dirs; -require XML::Lite; -require ConfigCase; - -#----------------------------------------------------------------------------------------------- -my $cfg_ref = ConfigCase->new("$case_def_dir/$config_def_file"); -print_compsets( "$case_def_dir/$compset_file" ); - -#----------------------------------------------------------------------------------------------- -# FINNISHED #################################################################################### -#----------------------------------------------------------------------------------------------- - -#------------------------------------------------------------------------------- - -sub print_compsets -{ - # Print all currently supported valid compsets - - my ($compset_file) = @_; - my $xml = XML::Lite->new( $compset_file ); - my $root = $xml->root_element(); - - # Check for valid root node - my $name = $root->get_name(); - $name eq "config_compset" or die - "file $compset_file is not a compset parameters file\n"; - - # Read the compset parameters from $compset_file. - my @e = $xml->elements_by_name( "compset" ); - my %a = (); - my %data; - while ( my $e = shift @e ) { - %a = $e->get_attributes(); - my $sname = $a{'SHORTNAME'}; - if ($a{GRID_MATCH} && exists($data{$sname}) && defined($data{$sname}{'DESC'} && defined($a{'DESC'}) ) ) { - if ( $data{$sname}{'DESC'} =~ /^INVALID:/ ) { - $data{$sname}{'DESC'} = $a{'DESC'}; - } - } elsif ( $a{'SHORTNAME'} =~ /^I/ ) { - $data{$sname}{'NAME'} = $a{'NAME'}; - $data{$sname}{'DESC'} = $a{'DESC'}; - } - } - print "\n"; - foreach my $sname ( sort(keys(%data)) ) { - print "$data{$sname}{'NAME'}" . - "($sname)\n"; - print "$data{$sname}{'DESC'}\n"; - } - print "\n"; -} - diff --git a/doc/UsersGuide/limitLineLen.pl b/doc/UsersGuide/limitLineLen.pl deleted file mode 100755 index 25f1216d06..0000000000 --- a/doc/UsersGuide/limitLineLen.pl +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env perl -# -# Limit the line length for output designed to go into the document. -# -use strict; -use Cwd; -use English; -use IO::File; -use Getopt::Long; -use IO::Handle; -#----------------------------------------------------------------------------------------------- - -# Get the directory name and filename of this script. If the command was -# issued using a relative or absolute path, that path is in $ProgDir. Otherwise assume the -# command was issued from the current working directory. - -(my $ProgName = $0) =~ s!(.*)/!!; # name of this script -my $ProgDir = $1; # name of directory containing this script -- may be a - # relative or absolute path, or null if the script - # is in - # the user's PATH -my $nm = "$ProgName::"; # name to use if script dies -my $scrdir; -if ($ProgDir) { - $scrdir = $ProgDir; -} else { - $scrdir = getcwd() -} -my $limitLen = 99; - -sub usage { - my $msg = shift; - - print "ERROR:: $msg\n"; - die < -OPTIONS - -l = Limit line length to this value (default $limitLen) -EOF -} - -sub LengthofwhiteSpaceNearLength { - my $line = shift; - my $leng = shift; - - my $l = $leng; - while( substr( $line, $l, 1 ) !~ /\s|:|,|\// ) { - # First search for white-space before desired length -- and then after - if ( $l <= $leng ) { - $l--; - } else { - $l++; - } - # Once reach beginning of line, go to the desired length+1 and increment - if ( $l < 0 ) { $l = $leng+1; } - # Once reach the very end of the line die as couldn't break it - if ( $l >= length($line) ) { - die "ERROR : went through entire line and did NOT find a place to break it\n"; - } - } - return( $l ); -} - -my %opts = ( limitLen => $limitLen ); - -GetOptions( - "l=s" => \$opts{'limitLen'}, -) or usage(); - -if ( $#ARGV != 0 ) { - &usage( "Wrong number of command line arguments" ); -} - -$limitLen = $opts{'limitLen'}; - -my $inputFile = $ARGV[0]; - -if ( ! -f $inputFile ) { - &usage( "Input file does NOT exist : $inputFile" ); -} - -my $fh = IO::File->new($inputFile, '<') or die "** $nm - can't open input file: $inputFile\n"; - -while (my $line = <$fh>) { - - while( length($line) > $limitLen ) { - print STDERR "Line length over $limitLen\n"; - my $lenlim = &LengthofwhiteSpaceNearLength( $line, $limitLen ); - if ( ($lenlim == length($line)) || $lenlim < 0 ) { - print "Can NOT truncate long line: $line\n"; - die "ERROR : Having trouble breaking a long line\n"; - } - my $substring = substr( $line, 0, $lenlim+1 ); - print "$substring \\ \n"; - my $newline = " " . substr( $line, $lenlim+1, length($line) ); - $line = $newline; - } - print $line; - -} -$fh->close; - - diff --git a/doc/UsersGuide/modelnl/Makefile b/doc/UsersGuide/modelnl/Makefile deleted file mode 100644 index 595a1b3a3e..0000000000 --- a/doc/UsersGuide/modelnl/Makefile +++ /dev/null @@ -1,85 +0,0 @@ -# -# Makefile to create HTML documentation of namelists -# -SCRNLDIR := ../../../../../../scripts/doc/modelnl -VPATH := . $(SCRNLDIR) ../../../../../drv/bld/namelist_files ../../../../../glc/cism/bld/namelist_files \ - ../../../../../atm/datm/bld/namelist_files ../../../../../../scripts/ccsm_utils/Case.template \ - ../../../../../../scripts/ccsm_utils/Machines ../../../../../rof/rtm/bld/namelist_files \ - ../../../bld/namelist_files ../.. -SOURCES := namelist_definition_drv.xml namelist_definition_cism.xml namelist_definition.xml \ - namelist_definition_rtm.xm namelist_definition_datm.xml config_definition.xml \ - config_grid.xml config_machines.xml config_compsets.xml ChangeSum -TAGFILE := clmtag.txt - -CWD := $(shell pwd ) -ALLOUT := $(CWD)/clm_nl_drv.html $(CWD)/clm_nl_cism.html $(CWD)/clm_nl_clm.html $(CWD)/clm_nl_rtm.html \ - $(CWD)/clm_nl_datm.html $(CWD)/clm_env_case.html $(CWD)/clm_env_build.html $(CWD)/clm_env_pesetup.html \ - $(CWD)/clm_env_run.html $(CWD)/clm_grid.html $(CWD)/clm_machines.html $(CWD)/clm_compsets.html \ - $(TAGFILE) $(CWD)/index.html - -all: $(ALLOUT) - -debug: - @echo "SOURCES = $(SOURCES)" - @echo "VPATH = $(VPATH)" - @echo "ALLOUT = $(ALLOUT)" - @echo "SCRNLDIR = $(SCRNLDIR)" - @echo "CWD = $(CWD)" - @echo "TAGFILE = $(TAGFILE)" - -.SUFFIXES: -.SUFFIXES: .xml .html .txt - -RM := /bin/rm - -CTAGNAME = $(shell cat $(TAGFILE) ) - - -$(TAGFILE): ChangeSum - head -3 $< | tail -1 | awk '{print $$1}' > $@ - -$(CWD)/index.html: $(TAGFILE) index.cpp - sed 's/CLMTAGNAME/$(CTAGNAME)/' index.cpp > $@ - -$(CWD)/clm_nl_drv.html: namelist_definition_drv.xml - cd $(SCRNLDIR) ; ./nldef2html_drv > $@ - -$(CWD)/clm_nl_cism.html: namelist_definition_cism.xml - cd $(SCRNLDIR) ; ./nldef2html_cism > $@ - -$(CWD)/clm_nl_clm.html: namelist_definition.xml - cd $(SCRNLDIR) ; ./nldef2html_clm > $@ - -$(CWD)/clm_nl_rtm.html: namelist_definition_rtm.xml - cd $(SCRNLDIR) ; ./nldef2html_rtm > $@ - -$(CWD)/clm_nl_datm.html: namelist_definition_datm.xml - cd $(SCRNLDIR) ; ./nldef2html_datm > $@ - -$(CWD)/clm_env_case.html: config_definition.xml - cd $(SCRNLDIR) ; ./xmldef2html_env_case > $@ - -$(CWD)/clm_env_build.html: config_definition.xml - cd $(SCRNLDIR) ; ./xmldef2html_env_build > $@ - -$(CWD)/clm_env_pesetup.html: config_definition.xml - cd $(SCRNLDIR) ; ./xmldef2html_env_pesetup > $@ - -$(CWD)/clm_env_run.html: config_definition.xml - cd $(SCRNLDIR) ; ./xmldef2html_env_run > $@ - -$(CWD)/clm_grid.html: config_grid.xml - cd $(SCRNLDIR) ; ./xmldef2html_grid > $@ - -$(CWD)/clm_machines.html: config_machines.xml - cd $(SCRNLDIR) ; ./xmldef2html_machines > $@ - -$(CWD)/clm_compsets.html: config_compsets.xml - cd $(SCRNLDIR) ; ./xmldef2html_compsets > $@ - -clean: - $(RM) -f $(ALLOUT) - -realclean: clean - $(RM) -f $(TAGFILE) - diff --git a/doc/UsersGuide/modelnl/index.cpp b/doc/UsersGuide/modelnl/index.cpp deleted file mode 100644 index c55aa64159..0000000000 --- a/doc/UsersGuide/modelnl/index.cpp +++ /dev/null @@ -1,45 +0,0 @@ - - - - - -CLM Namelist Definitions (CLMTAGNAME) - - - - - - -
- - -

CLM Tag: CLMTAGNAME

- -

Component Namelist Definitions

- - -

create_newcase files (supported machines, grids, compsets)

- - -

$CASEROOT xml files

- - - - - - diff --git a/doc/UsersGuide/modelnl/showinfo.js b/doc/UsersGuide/modelnl/showinfo.js deleted file mode 100644 index fd8a608472..0000000000 --- a/doc/UsersGuide/modelnl/showinfo.js +++ /dev/null @@ -1,193 +0,0 @@ - function applyFilter(filter_text) { - - // applying a filter hides all standard names not matching filter_text - // if filter_text contains no spaces, it is treated as a regexp - // otherwise, all substrings must occur somewhere - - var is_match = false; - var search_type = 'regexp'; - var search_help_text = false; - var num_matches = 0; - var is_boolean_and = true; - - search_help_text = (document.getElementById('search_help_text').checked); - is_boolean_and = (document.getElementById('logical_operator_and').checked); - - if (filter_text.indexOf(' ') == -1) { - search_type = 'regexp'; - var re = new RegExp(filter_text, 'i') - } - else { - search_type = 'string'; - var string_parts = filter_text.split(' '); - } - - allTRs = document.getElementsByTagName('tr'); - - for (var i = 0; i < allTRs.length; i++) { - curTR = allTRs[i]; - - if (curTR.id != '') { - - if (search_type == 'regexp') { - - is_match = curTR.id.substring(0, curTR.id.length - 3).match(re); - - if (search_help_text) { - - var helpText = document.getElementById(curTR.id.substring(0,curTR.id.length - 3) + '_help').innerHTML; - is_match = is_match || helpText.match(re); - } - } - else { - - if (is_boolean_and) { - var is_name_match = true; - for (var j = 0; j < string_parts.length && is_name_match; j++) { - - if (!curTR.id.match(new RegExp(string_parts[j], 'i'))) { - is_name_match = false; - } - } - } - else { - - var is_name_match = false; - for (var j = 0; j < string_parts.length && !is_name_match; j++) { - - if (curTR.id.substring(0, curTR.id.length - 3).match(new RegExp(string_parts[j], 'i'))) { - is_name_match = true; - } - } - } - - is_match = is_name_match; - - if (search_help_text) { - var helpText = document.getElementById(curTR.id.substring(0,curTR.id.length - 3) + '_help').innerHTML; - - if (is_boolean_and) { - var is_help_match = true; - - for (var j = 0; j < string_parts.length && is_help_match; j++) { - - if (!helpText.match(new RegExp(string_parts[j], 'i'))) { - is_help_match = false; - } - } - } - else { - - var is_help_match = false; - - for (var j = 0; j < string_parts.length && !is_help_match; j++) { - - if (helpText.match(new RegExp(string_parts[j], 'i'))) { - is_help_match = true; - } - } - } - - is_match = is_match || is_help_match; - - } - } - - if (!is_match) { - curTR.style.display = 'none'; - } - else { - num_matches++; - curTR.style.display = ''; - if (search_help_text) { - showHelp(curTR.id.substring(0,curTR.id.length - 3)); - } - else { - hideHelp(curTR.id.substring(0,curTR.id.length - 3)); - } - } - } - } - - var filter_matches = document.getElementById('filter_matches'); - var filter_matches_num = document.getElementById('filter_matches_num'); - var filter_matches_query = document.getElementById('filter_matches_query'); - - if (filter_text != '') { - filter_matches.style.visibility = 'visible'; - filter_matches_num.innerHTML = num_matches; - filter_matches_query.innerHTML = filter_text; - } - else { - filter_matches.style.visibility = 'hidden'; - } - - } // end function applyFilter() - - function clearFilter() { - - allTRs = document.getElementsByTagName('tr'); - - for (var i = 0; i < allTRs.length; i++) { - curTR = allTRs[i]; - if (curTR.id != '') { - curTR.style.display = ''; - hideHelp(curTR.id.substring(0,curTR.id.length - 3)); - - } - } - - var filter_matches = document.getElementById('filter_matches'); - filter_matches.style.visibility = 'hidden'; - - document.getElementById('filter_text').value = ''; - } - - function toggleHelp(standard_name) { - - // check for the existence of the help "tr" object for this standard_name - - var helpDiv = document.getElementById(standard_name + '_help'); - - if (helpDiv) { - - if (helpDiv.style.display != 'none') { - - helpDiv.style.display = 'none'; - - curArrow = document.getElementById(standard_name + '_arrow'); - curArrow.src = "./images/arrow_right.gif"; - } - else { - helpDiv.style.display = ''; - - curArrow = document.getElementById(standard_name + '_arrow'); - curArrow.src = "./images/arrow_down.gif"; - } - } - } - - - function showHelp(standard_name) { - - var helpDiv = document.getElementById(standard_name + '_help'); - - if (helpDiv) { - - helpDiv.style.display = ''; - curArrow = document.getElementById(standard_name + '_arrow'); - curArrow.src = "./images/arrow_down.gif"; - } - } - - function hideHelp(standard_name) { - - var helpDiv = document.getElementById(standard_name + '_help'); - - if (helpDiv) { - helpDiv.style.display = 'none'; - curArrow = document.getElementById(standard_name + '_arrow'); - curArrow.src = "./images/arrow_right.gif"; - } - } - diff --git a/doc/UsersGuide/modelnl/xmldef2html_compsets b/doc/UsersGuide/modelnl/xmldef2html_compsets deleted file mode 100755 index 2659beed17..0000000000 --- a/doc/UsersGuide/modelnl/xmldef2html_compsets +++ /dev/null @@ -1,162 +0,0 @@ -#!/usr/bin/env perl - -use strict; - -if ( $#ARGV != 0 ) { - die "Wrong number of input arguments -- should just enter one filename\n"; -} -my $infilename = $ARGV[0]; -if ( ! -f $infilename ) { - die "Input file: $infilename does NOT exist\n"; -} - -my @dirs = ('../../../../../../scripts/ccsm_utils/Tools/per5lib', '../../../../../../scripts//ccsm_utils/Tools/perl5lib/Build'); -unshift @INC, @dirs; -require XML::Lite; -use lib "../../../../../../scripts/ccsm_utils/Tools/perl5lib"; - -my $image_dir = "./images"; - -print <<"END_of_Start"; - - - - - - - CESM Component Models Namelist Definitions - - - - - - -

Search or Browse supported component sets

-

-This page contains the complete list of config_grid.xml variables available. They are grouped -by categories designed to aid browsing. Clicking on the name of a variable will display descriptive -information. If search terms are entered in the text box below, the list will be condensed to contain -only matched variables. -

- -
- - - - - - -
- - - -
- - - (separate search terms with spaces) -
- -
-
- - - -END_of_Start - -my $xml = XML::Lite->new( $infilename ); -my $root = $xml->root_element(); - -# Check for valid root node -my $name = $root->get_name(); -$name eq "config_compset" or die - "file $infilename is not a compset definition file\n"; - -# Print table -print_start_table("config_compsets.xml variables"); -my @e = $xml->elements_by_name( "compset" ); -my %a = (); -while ( my $e = shift @e ) { - %a = $e->get_attributes(); - - if ($a{'NAME'} =~ /I_/ ) { - my $var = $a{'NAME'}; - my $doc = "Description: $a{DESC} \n"; - my $grp = "$a{SHORTNAME}"; - print_row($var, $doc, $grp); - } -} -print_end_table(); - -# Finish -print <<"END_of_html"; - - -END_of_html - -#-------------------------------------------------------------------------------------------- - -sub print_start_table { - my $hdr = shift; - -print <<"START_table"; -

$hdr

- - - -START_table -} - -#-------------------------------------------------------------------------------------------- - -sub print_row { - - my $name = shift; - my $doc = shift; - my $grp = shift; - -print <<"END_of_row"; - - - - -END_of_row -} - -#-------------------------------------------------------------------------------------------- - -sub print_end_table { - -print <<"END_table"; -
Compset NameShort Name
- - - $name - - - $grp
-END_table -} - -#-------------------------------------------------------------------------------------------- - diff --git a/doc/UsersGuide/pergro.jpg b/doc/UsersGuide/pergro.jpg deleted file mode 100644 index a0cb81e046..0000000000 Binary files a/doc/UsersGuide/pergro.jpg and /dev/null differ diff --git a/doc/UsersGuide/preface.xml b/doc/UsersGuide/preface.xml deleted file mode 100644 index ed063ce774..0000000000 --- a/doc/UsersGuide/preface.xml +++ /dev/null @@ -1,1419 +0,0 @@ - - - -$Id$ - -Acknowledgments - -I want to acknowledge all of the people that helped review or edit the model -documentation: David Lawrence, Samuel Levis, Keith Oleson, and Sean Swenson. -Thank you for your help in catching errors, and making the document more -understandable and readable. Our readers thank you as well, as now it is much -easier for them to digest. Any mistakes, or errors are all mine. If you run -across one of those errors, please let us know, by following -. -I also want to thank Sheri -Mickelson, for her work in doing perturbation analysis on bluefire and intrepid, -which was used in our initial versions of this User's Guide. We also want to -thank the original authors of &ptclm;: Daniel M. Ricciuto, Dali Wang, Peter E. Thornton, -Wilfred M. Post, and R. Quinn Thomas for providing a nice addition to the &cesm; -effort. We also want to thank the folks at University of Michigan Biological Stations -(US-UMB) who allowed us to use their Fluxnet station data and import it into our -inputdata repository, especially Gil Bohrer the PI on record for this site -(see for permission information on using this -data). - - - - -Introduction - - -The Community Land Model (&clmrel;) is the latest in a series of -global land models developed by the &cesm; Land Model Working Group -(LMWG) and maintained at the National Center for -Atmospheric Research (&ncar;). This guide is intended to instruct both -the novice and experienced user on running &clm;. This guide pertains to the -latest version &clmrel; available for download from the public release -subversion repository as a part of &cesmrel;. Documentation may be different if you are using an -older version, you should either update to the latest version, or use the -documentation inside your own source tree. There is information in the -ChangeLog file and in the -regarding the changes from previous versions of &cesm;. - - - -The novice user should read - in detail before beginning work, while the -expert user should read and - chapters, and then use the more detailed -chapters as reference. Before novice users go onto more technical problems covered -in , , , or they -should know the material covered in and be able -to replicate some of the examples given there. - - -All users should read the - -and sections to understand the document conventions -and the various ways of getting help on using &clm4;. Users should also read -the section to see if their planned use of the -model is something that has been scientifically validated and well tested. Users -that are NOT using &ncar; machines or our list of well tested machines should also -read the section to make sure they have -all the required UNIX utilities on the system they want to do their work. - - - - - -Introduction to the &clm4; User's Guide -What is in here anyway? - - -Here in the introduction we first give a simple guide to understand the document -conventions in . The next section -describes the differences between &clmrel; and &clm40; (for each &cesm; release version -up to &cesmrel;) as well as between -&clm40; and &clm35;, both from a scientific -as well as a software engineering point of view. It also talks about differences in the -configuration, namelist, and history fields. The next section -is for users that are already experts in using &clm; and gives a quickstart guide to the -bare details on how to use &clm4;. The next tells -you about what has been extensively tested and scientifically validated (and maybe more -importantly) what has NOT. lists the UNIX utilities -required to use &clm4; and is important if you are running on non-&ncar; machines, generic -local machines, or machines NOT as well tested by us at &ncar;. Next we -have to detail some of the best practices for using -&clm4; for science. The last introductory section is which lists -different resources for getting help with &cesm1; and &clm4;. - - - - goes into detail on how to setup and run simulations with -&clm4; and especially how to customize cases. Details of &configure; -modes and &buildnml; options as well as namelist options are given in this chapter. - - - - gives instructions on the &clm4; tools for creating input datasets -for use by &clm;, for the expert user. There's an overview of what each tool does, and some general notes on how to build -the FORTRAN tools. Then each tool is described in detail along with different ways in -which the tool might be used. -A final section -on how to customize datasets for observational sites for very savvy expert users is given as the last section of this chapter. - - - -As a followup to the tools chapter, tells how to add files to the -XML database for &buildnml; to use. This is important if you want to use the XML database to automatically select -user-created input files that you have created when you setup new cases with &clm;. - - - -In , again for the expert user, we give details on how to do some particularly -difficult special cases. For example, we give the protocol for spinning up both the &clmcn; model and &clm; with dynamic -vegetation active (CNDV). We give instructions to do a spinup case -from a previous case with Coupler history output for atmospheric forcing. We also give -instructions on running the prognostic crop model and its irrigation option. We also review -how to validate a port to a new machine using the Perturbation error -growth technique. Lastly we tell the user how to use the DATM model to send historical &CO2; data to &clm;. - - - - outlines how to do single-point or -regional simulations using &clm4;. -This is useful to either compare &clm; simulations with point observational stations, -such as tower sites (which might include your own atmospheric forcing), or -to do quick simulations with &clm; for example to test a new parameterization. There are -several different ways given on how to perform -single-point simulations which range from simple &PTSMODE; to more complex where you create all your own datasets, tying into - and also to add the -files into the &buildnml; XML database. After this chapter - chapter outlines how to use the &ptclm; python script to -help you run single-point simulations. - - - -Finally, gives some guidance on trouble-shooting -problems when using &clm4;. It doesn't cover all possible problems with &clm;, but gives -you some guidelines for things that can be done for some common problems. - - - -In the appendices we talk about some issues that are useful for advanced users and -developers of &clm;. -In we give some basic background to the &clm; -developer on how to edit the models/lnd/clm/bld/clm.cpl7.template. -This is a very difficult exercise and we don't recommend it for any, but the most -advanced users of &clm; who are also experts in UNIX and UNIX scripting. - - -In we go over how to run the script -runinit_ibm.csh" that will interpolate standard resolution -initial condition dataset to several other resolutions at once. It also runs &clm; -to create template files as well as doing the interpolation using -interpinic. In general this is only something that a developer -would want to do. Most users will only want to interpolate for a few specific -resolutions. - - -In we go over the automated testing scripts for -validating that the &clm; is working correctly. The test scripts run many different -configurations and options with &clm; making sure that they work, as well as doing -automated testing to verify restarts are working correctly, and testing at many -different resolutions. In general this is an activity important only for a developer -of &clm;, but could also be used by users who are doing extensive code modifications -and want to ensure that the model continues to work correctly. - - -Finally in we give instructions on how to build -the documentation associated with &clm; (i.e. how to build this document). This -document is included in every &clm; distribution and can be built so that you can -view a local copy rather than having to go to the &cesm; website. This also could -be useful for developers who need to update the documentation due to changes they -have made. - - - - - - - - - -Important Notes and Best Practices for Usage of &clm4; - - - -When running with CN, it is critical to begin with initial conditions -hat are provided with the release or to spin the model up following the CN spinup -procedure before conducting scientific runs (see . -Simulations without a proper spinup will effectively be starting from an unvegetated -world. See for information on how to -provide initial conditions for your simulation. - - -Initial condition files are provided for fully coupled BCN and offline -ICN cases for 1850 and 2000 at 1deg, 2deg, and T31 resolutions. There's also an -initial condition file for ICN with the prognostic crop model for 2000 at 2deg -resolution, and one with &clmsp; for 2000 at 2deg resolution. We also have initial -conditions for offline CNDV for 1850. And there are interpolated datasets for 4x5 and -10x15 resolution for 1850. The 1850 initial condition -files are in 'reasonable' equilibrium. The 2000 initial condition files represent -the model state for the year 2000, and have been taken from transient simulations. -Therefore, by design the year 2000 initial condition files do not represent an -equilibrium state. Note also that spinning the 2000 initial conditions out to -equilibrium will not reflect the best estimate of the real carbon/nitrogen state -for the year 2000. - - -Users can generate initial condition files at different resolutions by -using the &clm; tool interpinic to interpolate from one of the -provided resolutions to the resolution of interest. Interpolated initial condition -files may no longer be in 'reasonable' equilibrium. - - -Aerosol deposition is a required field to &clm4; sent from the -atmosphere model. Simulations without aerosol deposition will exhibit unreasonably -high snow albedos. The model sends aerosol deposition from the atmospheric model (either -CAM or &datm;). When running with prescribed aerosol the atmosphere -model will interpolate the aerosols from 2-degree resolution to the resolution the -atmosphere model is running at. - - - - - - - - - - - - - $EDITOR - - - -How to Use This Document -Conventions used in the document for code and commands - - -This section provides the details in using &clm; with the &cesm; modeling -system. Links to descriptions and definitions have been provided in the code below. -We use the same conventions used in the &cesm; documentation as outlined below. - - - -Throughout the document this style is used to indicate shell -commands and options, fragments of code, namelist variables, etc. -Where examples from an interactive shell session are presented, lines -starting with > indicate the shell prompt. A backslash "\" at the end -of a line means the line continues onto the next one (as it does in -standard UNIX shell). Note that $EDITOR" is used to refer to the -text editor of your choice. $EDITOR is a standard UNIX environment -variable and should be set on most UNIX systems. Comment lines are -signaled with a "#" sign, which is the standard UNIX comment sign as well. -$CSMDATA is used to denote the path to the inputdata directory for -your &cesm; data. - -> This is a shell prompt with commands \ -that continues to the following line. -> $EDITOR filename # means you are using a text editor to edit "filename" -# This is a comment line - - - - - - - - - - - &clmcn; - &clmsp; - - -What is new with &clmrel; since previous public releases? - -In this section we list the updates that have occurred to &clm4; since previous -public releases. In the first sections we describe changes in &clmrel; since the &ccsm4; release, -and in the last one we describe changes from &clm35; to &clm40; release. Note, that -the changes in the last section do NOT include the more recent changes given in the -first section, but only list the changes from &clm35; to the &clm40; release that -was part of the &ccsm4; public release. We will describe both the -changes in the science in the model as the software engineering changes. Software -engineering changes includes the configure and namelist changes, as well as the new -history fields. - - -What is new with &clmrel; since the December 8th, 2010 &cesm102; release? - - - -What is new with &clmrel; Science since &clmcesm102;? - -A prognostic crop model option was added in (based on Agro-IBIS) from work by -Samuel Levis. The crop model adds in four new vegetation types for: soybean, -winter and spring temperate cereals, and corn on their own separate columns. Winter -cereal was added as a PFT type, but doesn't exist in the input datasets, only -spring cereal is used. Winter cereal also has NOT been scientifically validated -or tested. The model manages these by modeling both planting and harvesting. See for an example of running with it. - - -An irrigation model was added from work by Samuel Levis and Bill Sacks. This -model takes water from runoff and adds it to the crop pfts for areas equipped -for irrigation. See for an example of running with it. -Please note that the irrigation model only works with the crop model active. - - - - -What is new with &clmrel; Software since &clmcesm102;? - -Since &clmcesm102; all Input/Output uses &pio; (Parallel Input/Output package). -Restart history files are now &netcdf;. Input and output files can be read/written -in parallel using PIO. We removed a list of old CPP defines and removed the -old misc/preproc.h files. Also a new tool for working with single-point sites was -added into the &cesm; scripts the Python tool &ptclm;. We have a complete chapter on it's use. - - -New configuration options: - --crop --noio - - - -Configuration options removed: - --dust --progsslt - - - -New build-namelist options: - --co2_ppmv --rtm_res --rtm_tstep - - - -New precedence for build-namelist options is... - -Values set on the command-line using the -namelist option -(&CLMNAMELIST;). -Values read from the file specified by -infile (&usernlclm; file). -Datasets from the -clm_usr_name option (&CLMUSRDAT;). -Values set from a use-case scenario, e.g., -use_case (&CLMUSECASE;). -Values from the namelist defaults file. - - - -Namelist options renamed: - -carbon_only => suplnitro (can be set to NONE or ALL) - - - -namelist options removed: - -carbon_only => suplnitro -scaled_harvest -hist_crtinic -hist_pioflag -ncd_lowmem2d -ncd_pio_def -ncd_pio_UseRearranger -ncd_pio_UseBoxRearr -ncd_pio_SerialCDF -ncd_pio_IODOF_rootonly -ncd_pio_DebugLevel -ncd_pio_num_iotasks - - - -New history fields: - -A5TMIN 5-day running mean of min 2-m temperature -(K) -A10TMIN 10-day running mean of min 2-m temperature -(K) -GDD0 Growing degree days base 0C from planting -(ddays) -GDD8 Growing degree days base 8C from planting -(ddays) -GDD10 Growing degree days base 10C from planting -(ddays) -GDD020 Twenty year average of growing degree days base 0C from planting -(ddays) -GDD820 Twenty year average of growing degree days base 8C from planting -(ddays) -GDD1020 Twenty year average of growing degree days base 10C from planting -(ddays) -GDDPLANT Accumulated growing degree days past planting date for crop -(ddays) -GDDHARV Growing degree days (gdd) needed to harvest -(ddays) -GDDTSOI Growing degree-days from planting (top two soil layers) -(ddays) -QIRRIG water added through irrigation -(mm/s) - - - -SNOWLIQ and SNOWICE changed from average to instantaneous output. - - - - -What was new with &clm4014; (in &cesm102;) since the September 17th, 2010 &cesm101; release? - -Since, &clm4010; in the &cesm101; release there were several developments made -to &clmrel;. Several new namelist items were added -a few new history fields. There were also some updates for -running the model with single-point mode. - - -Configuration options that were renamed: - -prog_seasalt => progsslt - - - -Namelist items removed: - -prog_seasalt => progsslt - - - -What was new with &clm4014; Science since &clm4010;? - -A long simulation at the course resolution of T31 (typically used for Paleo-climate -studies) was done and an spun-up initial condition file was provided for this -resolution (also by default the namelist variable ice_runoff was -turned off for T31). Also a new surface dataset and transient land-cover dataset was -provided for half-degree resolution. - - - -What was new with &clm4014; Software since &clm4010;? - -New configuration options - -sitespf_pt - - - -sitespf_pt is used for single-point/regional mode and is set to the site-name -that will be used (see the config_definition.xml for the -list of valid options). - - -Configuration options that were renamed: - -prog_seasalt => progsslt - - - -Namelist items removed: - -faerdep -fndepdat -fndepdyn -use_ndepstream - - - -Nitrogen deposition datasets are now only entered through the -ndepdyn_nml namelist (removing fndepdat, fndepdyn, and -use_ndepstream). Aerosol deposition is now a required input from -the atmosphere model, hence faerdep is removed. - - -New history fields: - -U10 10-m wind (m/s) -U10_DUST 10-m wind for dust model (m/s) -VA atmospheric wind speed plus convective velocity (m/s) -VOLR RTM storage: LIQ (m3) -VOLR_ICE RTM storage: ICE (m3) - - - - - - -What was new with &clm4010; (in &cesm101;) since the April 1st, 2010 &ccsm4; release? - -From, &clm40; in the &ccsm4; release to &clm4010; there were several developments made -to &clm;. A glacier multiple elevation class option was added that allows the -use of &clm4; with a glacier land ice model the Community Ice Sheet Model (CISM). -A bug-fix for the snow hydrology was added. Several new namelist items were added -a few new history fields. Also the capability of reading aerosol and nitrogen -deposition from stream files at one resolution and regridded on the fly rather than -with datasets at the model resolution was added in. This was important for higher -resolutions so that large datasets do not have to be created before running the model, -nor are datasets for every resolution required. - - -What was new with &clm4010; Science since &ccsm4;? - -In general, snow layers should not be thinner than - -dzmin = wice/rhoice + wliq/rholiq - -If dz < dzmin, then the value of "void" computed in subroutine -SnowCompaction is negative, which is unphysical. This doesn't cause -problems with the compaction itself, but results in unrealistic values -of vol_ice, vol_liq, and eff_porosity in subroutine SnowWater. We can -have vol_ice = 1 and vol_liq = 0 even when liquid is present, which cuts -off the runoff (qout) from the lowest snow layer. Liquid water then -accumulates in the snow column without draining, which leads to further -problems and eventually a code crash. - - -The solution to this problem was to adjust layer thickness dz for any water+ice content -changes in excess of previous layer thickness, e.g., - -dz(c,j) = max(dz(c,j),h2osoi_liq(c,j)/denh2o + h2osoi_ice(c,j)/denice) - -at appropriate steps in the snow hydrology subroutines. - - - -Snow hydrology bug fix. -Add multiple elevation class option for glaciers so can interact with -the land ice sheet model. - - - - -What was new with &clm4010; Software since &ccsm4;? - -New configuration options - -glc_nec - - - -glc_nec can be 1,3,5, or 10 and MUST match the number on the input surface dataset -the elevation classes themselves are read from the surface dataset - - -New namelist items: - -carbon_only -create_glacier_mec_landunit -glc_dyntopo -ice_runoff -ndepmapalgo -scaled_harvest - - - -carbon_only = If true, and CLMCN carbon-nitrogen model is on, Nitrogen is unlimited - rather than prognosed and vegetation will be over-productive (replaces the supplemental Nitrogen #ifdef) - - - create_glacier_mec_landunit (= T when these landunits are created; F by default) - - - glc_dyntopo (= T if &clm; topography changes dynamically; currently F) - (NOT fully implemented yet) - - -ice_runoff = If true, river runoff will be split up into liquid and ice streams, - otherwise ice runoff will be zero and all runoff directed to liquid stream - - - ndepmapalgo = Mapping method from Nitrogen deposition input file to the model - resolution (can be bilinear,nn,nnoni,nnonj,spval,copy, bilinear by default) - - -scaled_harvest = If true, harvesting will be scaled according to coefficients - determined by Johann Feddema, 2009 - - -New history fields: - -aais_area Antarctic ice area (km^2) -aais_mask Antarctic mask (unitless) -gris_area Greenland ice area (km^2) -gris_mask Greenland mask (unitless) -QICE ice growth/melt (mm/s) -QICEYR ice growth/melt (mm/s) -QTOPSOIL water input to surface (mm/s) -VOLR RTM storage: LIQ (m3) -VOLR_ICE RTM storage: ICE (m3) - - - - - - - -What was new with &clm40; since &clm35;? - - -From &clm35; to &clm40; there were advances in both the science and the software infrastructure. -There were also new configure and namelist options as well as new history fields. In this -section we will describe each of these changes in turn. - - - -What was new with &clm40; Science? - -The following aspects are changes to the science in &clm40; since &clm35;. - - -Biogeophysics and Hydrology - -Changes to &clm40; beyond &clm35; (Oleson et al., 2008a; Stockli et al., 2008) include -updates throughout the model. The hydrology scheme has been modified with a revised -numerical solution of the Richards equation (Zeng and Decker, 2009; Decker and Zeng, -2009); a revised soil evaporation parameterization that removes the soil resistance term -introduced in &clm35; and replaces it with a so-called &Bgr; formulation, as well as accounts for the role of litter and within- -canopy stability (Sakaguchi and Zeng, 2009). -&clm4; also includes a representation of the thermal and hydraulic properties of organic -soil that operates in conjunction with the mineral soil properties (Lawrence and Slater, -2008). The ground column has been extended to ~50-m depth by adding five additional -hydrologically inactive ground layers (making a total of 15 ground layers, 10 soil -layers and 5 bedrock layers; Lawrence et al., 2008). An urban landunit and associated -urban canyon model (&clmu;) has been added which permits the study of urban climate -and urban heat island effects (Oleson et al., 2008b). - - - - -Snow Model - -The snow model is significantly modified via incorporation of SNICAR (SNow and Ice Aerosol Radiation) which represents the effect of aerosol deposition (e.g. black and organic carbon and dust) on albedo, introduces a grain-size dependent snow aging parameterization, and permits vertically resolved snowpack heating (Flanner and Zender, 2005; Flanner and Zender, 2006; Flanner et al., 2007). The new snow model also includes a new density-dependent snow cover fraction parameterization (Niu and Yang, 2007), a revised snow burial fraction over short vegetation (Wang and Zeng, 2009) and corrections to snow compaction (Lawrence and Slater, 2009). - - - - -Surface Datasets - -The PFT distribution is as in Lawrence and -Chase (2007) except that a new cropping dataset is used (Ramankutty et al., 2008) and -a grass PFT restriction has been put in place to reduce a high grass PFT bias in -forested regions by replacing the herbaceous fraction with low trees rather than grass. -Grass and crop PFT optical properties have been adjusted according to values presented -in Asner et al. (1998), resulting in significantly reduced albedo biases. Soil colors -have been re-derived according to the new PFT distribution. - - - - -Biogeochemistry - -The model is extended with a carbon-nitrogen biogeochemical model (Thornton et al., 2007; -Thornton et al., 2009; Randerson et al., 2009) which is referred to as &clmcn;. CN is -based on the terrestrial biogeochemistry Biome-BGC model with prognostic carbon and -nitrogen cycle (Thornton et al., 2002; Thornton and Rosenbloom, 2005). &clmcn; is -prognostic with respect to carbon and nitrogen state variables in the vegetation, litter, -and soil organic matter. Vegetation phenology and canopy heights are also prognostic. A -detailed description of the biogeochemical component can be found in Thornton et al. -(2007). Note that &clm40; can be run with either prescribed satellite phenology -(&clmsp;) or with prognostic phenology provided by the carbon- nitrogen cycle model -(&clmcn;). Additionally, a transient land cover and land use change, including wood harvest, -capability has been introduced that enables the evaluation of the impact of historic and -future land cover and land use change on energy, water, and momentum fluxes as well as -carbon and nitrogen fluxes. The dynamic global vegetation model in &clm3; has been -revised such that the carbon dynamics (e.g. productivity, decomposition, phenology, -allocation, etc.) are controlled by CN and only the dynamic vegetation biogeography -(competition) aspect of the &clm3; DGVM is retained. -The biogenic volatile organic compounds model (BVOC) that was available in &clm3; has -been replaced with the MEGAN BVOC model (Heald et al. 2008). - - - - -Miscellaneous Changes - -Several other minor changes have been incorporated including a change to the atmospheric reference height so that it is the height above zo+d for all surface types. The convergence of -canopy roughness length zo and displacement height d to bare soil values as the -above-ground biomass, or the sum of leaf and stem area indices, goes to zero is ensured -(Zeng and Wang, 2007). Several corrections have been made to the way the offline forcing -data is interpreted. The main change is a vastly improved and smooth diurnal cycle of -incoming solar radiation that conserves the total incoming solar radiation from the -forcing dataset. Additionally, in offline mode rather than partitioning incoming solar -radiation into a constant 70%/30% direct vs diffuse split, it is partitioned according to -empirical equations that are a function of total solar radiation. Finally, to improve -global energy conservation in fully coupled simulations, runoff is split into separate -liquid and ice water streams that are passed separately to the ocean. Input to the ice -water comes from excess snowfall in snow-capped regions. - - - - -Summary of Science Changes - -Taken together, these augmentations to &clm35; in &clm40; result in improved soil moisture dynamics -that lead to higher soil moisture variability and drier soils. Excessively wet and -unvarying soil moisture was recognized as a deficiency in &clm35; (Oleson et al. 2008a, -Decker and Zeng, 2009). The revised model also simulates, on average, higher snow cover, -cooler soil temperatures in organic-rich soils, greater global river discharge, lower -albedos over forests and grasslands, and higher transition-season albedos in snow covered -regions, all of which are improvements compared to &clm35;. - - - - - -What is new with &clm40; Software Infrastructure? - -The following aspects are changes to the software infrastructure in &clm40; since &clm35;. - - - - Update to cpl7 and scripts. - Remove offline and cpl6 modes. - Remove support for CASA model. - Update to datm8 atmospheric data model. - Add gx3v7 land mask for T31 and fv-4x5 horizontal resolutions. - Add gx1v6 land mask for f05, f09, and f19 horizontal resolutions. - Add tx1v1 land mask and 1.9x2.5_tx1v1 horizontal resolution. - Add in 2.5x3.33 horizontal resolution. - Add in T62 horizontal resolution so can run at same resolution as input &datm; -data. - Allow first history tape to be 1D. - Add ability to use own version of input datasets with &CLMUSRDAT; -variable. - Add a script to extract out regional datasets. - New &buildnml; system with XML file describing all namelist -items. - Add glacier_mec use-case and stub glacier model. - Make default of maxpatch_pft=numpft+1 instead of 4. - Only output static 3D fields on first h0 history file to save space. - Add new fields for VOC (Volatile Organic Compounds) on surface datasets, - needed for the new MEGAN VOC model. - Add multiple elevation class option for glaciers in mksurfdata tool (NOT used -in &clm; yet). - Add ascale field to land model in support of model running on it's own -grid. - - - - - -What are The New Configuration Options in &clm40;? - -Describe any changes made to build system: - - - -Change directory structure to match &ccsm;. -Add BGP target. -Add choice between ESMF and MCT frameworks. -Start removing #ifdef and directives that supported Cray-X1 Phoenix as now -decommissioned. -Make default of maxpatch_pft=numpft+1 instead of 4 for all -configurations. -By default turn on CLAMP when either CN or CASA is enabled -New SNICAR_FRC, CARBON_AERO, and C13 CPP ifdef tokens. - - - - -New options added to &configure;: -More information on options to &clm; &configure; are given in . - - - - -OptionDescription --comp_intf <name>Component interface to use (ESMF or MCT) (default -MCT) --nofireTurn off wildfires for bgc setting of CN (default includes -fire for CN) --pio <name>Switch enables building with Parallel I/O library. [on -| off] (default is on) --snicar_frc <name>Turn on SNICAR radiative forcing calculation. [on | -off] (default is off) - -More information on options to &clm; &configure; are given in . - - - - -What are The New Namelist Options in &clm40;? - -&buildnml; now checks the validity of your namelist you generate by looking at data in -the namelist_definition.xml file. In order to add new namelist items you need to -change the code and also edit this file (e.g. a namelist option required for your -research project that is not currently an option in &clm40;). To view information -on the namelist view the -file: models/lnd/clm/bld/namelist_files/namelist_definition.xml -in a browser and you'll see the names, type, description and valid_values for all -namelist variables. - - -Changes to &buildnml;: - -Allow simulation year entered to include ranges of years (i.e. 1850-2000) -Remove cam_hist_case option. -Make sure options ONLY used for stand-alone testing have a "drv_" or "datm_" - prefix in them and list these options all together and last when asking for - help from &buildnml;. - - - - New option to &buildnml;: - - -clm_usr_name "name" Dataset resolution/descriptor for personal datasets. - Default: not used - Example: 1x1pt_boulderCO_c090722 to describe location, - number of pts, and date files created - - - - New list options to &buildnml; - - cd models/lnd/clm/bld - ./&buildnml; -res list # List valid resolutions - ./&buildnml; -mask list # List valid land-masks - ./&buildnml; -sim_year list # List valid simulation years and simulation year ranges - ./&buildnml; -clm_demand list # List namelist variables including those you could - # demand to be set. - ./&buildnml; -use_case list # List valid use-cases - - - - -New use-cases for &buildnml;: - - 1850_control = Conditions to simulate 1850 land-use - 2000_control = Conditions to simulate 2000 land-use -20thC_transient = Simulate transient land-use, aerosol and Nitrogen deposition - from 1850 to 2005 - - - - - New namelist items: - - urban_hac = OFF, ON or ON_WASTEHEAT (default OFF) Flag for urban Heating - and Air-Conditioning - OFF = Building internal temperature is un-regulated. - ON = Building internal temperature is bounded to reasonable range. - ON_WASTEHEAT = Building internal temperature is bounded and resultant waste - heat is given off. - urban_traffic = .true. or .false. Flag to include additional multiplicative - factor of urban traffic to sensible heat flux. - (default .false.) - fsnowoptics = filename file for snow/aerosol optical properties (required) - fsnowaging = filename file for snow aging parameters (required) - -More information on the &buildnml; options are given in -. -and in -&CLMBLDNML;. - - -More information on the &buildnml; options are given in in . - - - - -What are The New History Fields? - -New history variables: (note watt vs. W in units, 26 vs. 76) - - - - -NameLong-nameUnitsActive/Inactive -BCDEPtotal BC deposition (dry+wet) from -atmospherekg/m^2/s -BIOGENCObiogenic CO -fluxuGC/M2/H -C13_PRODUCT_CLOSSC13 total carbon loss from wood product -poolsgC13/m^2/s -DSTDEPtotal dust deposition (dry+wet) from -atmospherekg/m^2/s -EFLX_DYNBALdynamic land cover change conversion energy -fluxW/m^2 -FGR12heat flux between soil layers 1 and -2watt/m^2 -FSATfractional area with water table at -surfaceunitless -FSH_NODYNLNDUSEsensible heat flux not including correction for land use change - watt/m^2 -GC_HEAT1initial gridcell total heat -contentJ/m^2 -GC_HEAT2post land cover change total heat -contentJ/m^2inactive -GC_ICE1initial gridcell total ice -contentmm/s -GC_ICE2post land cover change total ice -contentmm/sinactive -GC_LIQ1initial gridcell total liq -contentmm -GC_LIQ2initial gridcell total liq content -mminactive -H2OSNO_TOPmass of snow in top snow -layerkg - HEAT_FROM_ACsensible heat flux put into canyon due to heat -removed from air conditioningwatt/m^2 -HKhydraulic -conductivitymm/sinactive -ISOPRENEisoprene fluxuGC/M2/H -LAND_USE_FLUXtotal C emitted from land cover conversion and -wood product poolsgC/m^2/s -LAND_UPTAKENEE minus LAND_USE_FLUX, negative for -updategC/m^2/s -LWupupwelling longwave -radiationwatt/m^2inactive -MONOTERPmonoterpene -fluxuGC/M2/H -NBPnet biome production, includes fire, landuse, and harvest -flux, positive for sinkgC/m^2/s -OCDEPtotal OC deposition (dry+wet) from -atmospherekg/m^2/s -OVOCother VOC fluxuGC/M2/H -ORVOCother reactive VOC -fluxuGC/M2/H -PBOTatmospheric pressurePa -PCO2atmospheric partial pressure of -&CO2;Pa -PRODUCT_CLOSStotal carbon loss from wood product -poolsgC/m^2/s -PRODUCT_NLOSStotal N loss from wood product -poolsgN/m^2/s -Qairatmospheric specific -humiditykg/kginactive -Qanthanthropogenic heat -fluxwatt/m^2inactive -Qtaumomentum fluxkg/m/s^2 -QFLX_LIQ_DYNBALliq dynamic land cover change conversion -runoff fluxmm/s -QFLX_ICE_DYNBALice dynamic land cover change conversion -runoff fluxmm/s -QRUNOFF_NODYNLNDUSEtotal liquid runoff not including correction for land use change (does not include QSNWCPICE) -mm/s -QSNWCPICEexcess snowfall due to snow -cappingmm/s -QSNWCPICE_NODYNLNDUSEexcess snowfall due to snow capping not including correction for land use change -mm/s -QSNWCPLIQexcess rainfall due to snow -cappingmm/sinactive -SMPsoil matric -potentialmminactive -SNOAERFRC2Lsurface forcing of all aerosols in snow, averaged only when snow is present (land) -watt/m^2 -SNOAERFRCLsurface forcing of all aerosols in snow -(land)watt/m^2 -SNOBCFRCLsurface forcing of BC in snow -(land)watt/m^2 -SNOBCMCLmass of BC in snow -columnkg/m2 -SNOBCMSLmass of BC in top snow -layerkg/m2 -SNOdTdzLtop snow layer temperature gradient -(land)K/m -SNODSTFRC2Lsurface forcing of dust in snow, averaged only when snow is present (land) -watt/m^2 -SNODSTFRCLsurface forcing of dust in snow -(land)watt/m^2 -SNODSTMCLmass of dust in snow -columnkg/m2 -SNODSTMSLmass of dust in top snow -layerkg/m2 -SNOFSRNDdirect nir reflected solar radiation from -snowwatt/m^2inactive -SNOFSRNIdiffuse nir reflected solar radiation from -snowwatt/m^2inactive -SNOFSRVDdirect vis reflected solar radiation from -snowwatt/m^2inactive -SNOFSRVIdiffuse vis reflected solar radiation from -snowwatt/m^2inactive -SNOFSDSNDdirect nir incident solar radiation on -snowwatt/m^2inactive -SNOFSDSNIdiffuse nir incident solar radiation on -snowwatt/m^2inactive -SNOFSDSVDdirect vis incident solar radiation on -snowwatt/m^2inactive -SNOFSDSVIdiffuse vis incident solar radiation on -snowwatt/m^2inactive -SNOLIQFLtop snow layer liquid water fraction -(land)fractioninactive -SNOOCMCLmass of OC in snow -columnkg/m2 -SNOOCMSLmass of OC in top snow -layerKg/m2 -SNOOCFRC2Lsurface forcing of OC in snow, averaged only when snow is present (land) -watt/m^2 -SNOOCFRCLsurface forcing of OC in snow -(land)watt/m^2 -SNORDSLtop snow layer effective grain -radiusm^-6inactive -SNOTTOPLsnow temperature (top -layer)K/minactive -SWupupwelling shortwave -radiationwatt/m^2inactive -TSOI_10CMsoil temperature in top 10cm of -soilK -URBAN_ACurban air conditioning -fluxwatt/m^2 -URBAN_HEATurban heating -fluxwatt/m^2 -VOCFLXTtotal VOC flux into -atmosphereuGC/M2/H -Windatmospheric wind velocity -magnitudem/sinactive -WOOD_HARVESTCwood harvest (to product -pools)gC/m^2/s -WOOD_HARVESTwood harvest (to product -pools)gN/m^2/s - - - - - History field name changes: - - -OldNew -ANNSUM_PLANT_NDEMAND= -ANNSUM_POTENTIAL_GPP -ANNSUM_RETRANSN= ANNMAX_RETRANSN -C13_DWT_PROD10C_LOSS= C13_PROD10C_LOSS -C13_DWT_PROD100C_LOSS= C13_PROD100C_LOSS -C13_DWT_PROD10N_LOSS= C13_PROD10N_LOSS -C13_DWT_PROD100C_LOSS= C13_PROD100C_LOSS -DWT_PROD100N_LOSS= PROD10N_LOSS -DWT_PROD100N_LOSS= PROD100N_LOSS -DWT_PROD100C_LOSS= PROD10C_LOSS -DWT_PROD100C_LOSS= PROD100C_LOSS -HCSOISNO= HC -TEMPSUM_PLANT_NDEMAND= -TEMPSUM_POTENTIAL_GPP -TEMPSUM_RETRANSN= TEMPMAX_RETRANSN - - - -History field names deleted include: SNOWAGE, TSNOW, FMICR, FCO2, DMI, QFLX_SNOWCAP - - - -Add new urban oriented _U, and _R (Urban and Rural) for the following history variables: -EFLX_LH_TOT, FGR, FIRA, FSH, FSM, Q2M, QRUNOFF, RH2M, SoilAlpha, TG, TREFMNAV, -TREFMXAV, and TSA (missing _R for SoilAlpha as the regular SoilAlpha is only defined -for rural areas anyway) - - - -We are missing the Rural soil-alpha variable: SoilAlpha_R on purpose. -SoilAlpha_U is only defined over pervious road, and missing everywhere else. -SoilAlpha is defined only for rural areas. - - - - - - - - - - - -Quickstart to using &clm4; - -Before working with &clm4; read the QuickStart Guide in the -&cesmrel; -Scripts User's Guide. Once you are familiar with how to setup cases for -any type of simulation with &cesm; you will want to direct your attention to the specifics -of using &clm;. - - -For some of the details of setting up cases for &clm4; read the README and text files available -from the "models/lnd/clm/doc" directory (see the "&clm; Web pages" section for a link to the list -of these files). Here are the important ones that you should be familiar with. - -README file describing the directory structure. -Quickstart.userdatasets file describing how to -use your own datasets in the model (also see ). -&KnownBugs; file describing known -problems in &clm4; (that we expect to eventually fix). -KnownLimitations file -describing known limitations in &clm4; and workarounds that we do NOT expect to -fix. - - -The IMPORTANT_NOTES file is given in the next chapter on what -is functional/validated in &clm4;? - -The ChangeLog/ChangeSum files are largely explained in the previous chapter on "What is new with -&clm4;?" - -Note other directories have README files that explain different components and tools used -when running &clm; and are useful in -understanding how those parts of the model work and should be consulted when using tools in those directories. -For more details on configuring and customizing a case with &clm; see . - -The Quickstart.GUIDE (which can be found in -models/lnd/clm/doc) is repeated here. - -&quickstart_guide; - - - - -What is scientifically validated and functional in &clm4;? - -In this section we go over what has been extensively tested and scientifically validated -with &clm4;, and maybe more importantly what has NOT been tested and may NOT be -scientifically validated. You can use all features of &clm;, but need to realize that -some things haven't been tested extensively or validated scientifically. When you use -these features you may run into trouble doing so, and will need to do your own work to -make sure the science is reasonable. - - - -Standard Configuration and Namelist Options that are Validated - -The standard version of the model is &clmcn; at 1-degree horizontal resolution (0.9x1.25). This version has been scientifically -validated with long simulations for: fully coupled simulations ("B" cases), coupled to -atmosphere model CAM ("F" cases), and stand-alone &clm; -cases ("I" cases). We've also done both long simulations for 1850 conditions, and transient 20th century simulations from 1850 to 2005 (with -transient land-use, Nitrogen and Aerosol deposition). There have also been transient -future scenario simulations done for fully coupled cases for different "representative -concentration pathway" (RCP) scenarios (RCP2.6, RCP4.5, RCP6.0, and RCP8.5). -To a lesser extent there have also -been simulations done at T31 and 2-degree horizontal -resolution (1.9x2.5), and with &clmsp; for these resolutions. As such we have provided -appropriate 1-degree, 2-degree, and T31 initial condition -datasets for these configurations. The irrigation and prognostic crop models were both -validated at 2-degree resolution. The irrigation model for &clmsp; for present day -conditions for an "I" compset, and the prognostic crop model for present day conditions -for a case coupled to the active land model, but using a data ocean model (an "F" -compset). Other resolutions, configurations, and namelist options are less well tested or scientifically validated. -The further you get away from the standard configurations and resolutions, the more likely you are to run into trouble, and/or need to -scientifically validate your work. - - -In the sections below we go through configuration and/or namelist options or modes that the user should be especially wary of using. You -are of course free to use these options, and you may find that they work functionally. Although in some cases you will find issues even -with functionality of using them. If so you will need to test, debug and find solutions for these issues on your own. But in every case -you will need to go through more extensive work to validate these options from a scientific standpoint. - - - - -Configure Modes NOT scientifically validated, documented, supported or, in some -cases, even advised to be used: - - - - - C13(-c13) -The C13 mode for bgc=cn is NOT scientifically validated or documented and is NOT -recommended for use. - - - - - - CASA(-bgc casa) -The bgc=casa mode is NOT scientifically validated or documented and is NOT -recommended for use. - - - - - - SNICAR_FRC(-snicar_frc) - This mode is tested and functional, but is NOT constantly scientifically validated, and should be - considered experimental. - - - - - - - -Namelist options that should NOT be exercised: - -Build-Namelist options that should NOT be exercised: - - --irrig with -bgc cn -We have only run the irrigation model with &clmsp; (i.e. without the CN model). We -recommend that if you want to run the irrigation model with CN, that you do a spinup. -But, more than that you may need to make adjustments to -irrig_factor in -models/lnd/clm/src/biogeophys/CanopyFluxesMod.F90. See the -notes on this in the description of the irrigation model in the - -Technical Descriptions of the Interactive Crop Management and Interactive -Irrigation Models. - - --irrig with -crop on -Irrigation doesn't work with the prognostic crop model. Irrigation is only applied to -generic crop currently, which negates it's practical usage. We also have a known -problem when both are on (see bug 1326 in the &KnownBugs; file). -If you try to run in this mode, the &clm; &buildnml; will return with an error. - - --lnd_res: Fine-mesh mode, functional, but experimental - --rcp: Representative Concentration Pathway (RCP) -for future scenarios, functional for limited resolutions, but experimental - --datm_*: All options that start with "datm_" they are -only used for &clm; stand-alone testing. - --drv_*: All options that start with "drv_" they are -only used for &clm; stand-alone testing. - - - - - - -Namelist items that should NOT be exercised: - - -casa namelist options: lnpp, lalloc, q10, spunup, and fcpool - - CASA has NOT been scientifically validated in &clm4;. - - - - suplnitro='ALL' - The suplnitro namelist option to the CN Biogeochemistry model supplies -unlimited nitrogen and therefore vegetation is over-productive in this mode. - - - -urban_traffic: Not currently functional - - - - - - - - - -What are the UNIX utilities required to use &clm;? - -Running the &clm; requires a suite of UNIX utilities and programs and you should -make sure you have all of these available before trying to go forward with using -it. If you are missing one of these you should contact the systems administrator -for the machine you wish to run on and make sure they are installed. - -&FORTRAN90; compiler -"C" compiler -GNU make -UNIX csh and tcsh shells -UNIX sh shell -UNIX bash shell -UNIX awk -UNIX sed -&netcdf; library -MPI Library -"C" pre-processor -&perl; -Autoconf -m4 macro processor -Parallel &netcdf; (optional) -&ncl; (for some of the offline tools for creating/modifying &clm; input -datasets see for more information on &ncl;) -Python (optional, needed for &ptclm;) -xsltproc, docbook and docbook utilities (optional, needed to build the Users-Guide) -protex and latex2html (optional, needed to build the Code-Reference Guide) - - - - - - - - - &cesm; Online Bulletin Board - &cesmrel; Scripts User's Guide - - -Other resources to get help from - - -In addition to this users-guide there are several other resources that are available -to help you use &clm4;. The first one is the &cesm; User's-Guide, which documents the entire -process of creating cases with &cesm;. The next is the &cesm; bulletin board which is -a web-site for exchanging information between users of &cesm;. There are also &clm; -web-pages specific for &clm;, and finally there is an email address to report bugs that -you find in &cesm1;. - - - -The &cesm; User's-Guide - -&clmrel; is always run from within the standard &cesmrel; build and run scripts. Therefore, the -user of &clm4; -should familiarize themselves with the &cesmrel; scripts and understand how to work with them. -User's-Guide documentation on the &cesmrel; scripts are available from the following web-page. The purpose -of this &clmrel; User's Guide is to give the &clm4; user more complete details on how to work -with &clm; and the set of tools that support &clm;, as well as to give examples that are unique to the use -of &clm;. However, the &cesmrel; Scripts User's-Guide remains the primary source to get detailed -information on how to build and run the &cesm; system. - -&cesm1; Scripts -User's-Guide - - - - - -The &cesm; Bulletin Board - -There is a rich and diverse set of people that use the &cesm;, and often it is useful to be in contact with -others to get help in solving problems or trying something new. To facilitate this we have an online -Bulletin Board for questions on the &cesm;. There are also different sections in the Bulletin Board for -the different component models or for different topics. - -&cesm; Online Bulletin Board - - - - - -The &clm; web pages - -The main &clm; web page contains information on the &clm;, it's history, developers, as well as -downloads for previous model versions. There are also documentation text files in the -models/lnd/clm/doc directory that give some quick information on using &clm;. - -&clm; web page -&clm; Documentation Text Files - -Also note that several of the XML database files can be viewed in a web browser to get -a nice table of namelist options, namelist defaults, or compsets. Simply view them -as a local file and bring up one of the following files: - -models/lnd/clm/bld/namelist_files/namelist_definition.xml --- definition of &clm; namelist items. -models/lnd/clm/bld/namelist_files/namelist_defaults_clm.xml --- default values for &clm; namelist items. -scripts/ccsm_utils/Case.template/config_definition.xml --- definition of all env_*.xml items. -scripts/ccsm_utils/Case.template/config_compsets.xml --- definition of all the compsets. -models/lnd/clm/bld/namelist_files/history_fields.xml --- definition of &clm; history fields. - - - - - -Reporting bugs in &clm4; - -If you have any problems, additional questions, bug reports, or any other feedback, please send an email to -cesmhelp@cgd.ucar.edu. If you find bad, wrong, or misleading information - in this users guide send an email to erik@ucar.edu. The current list of -known issues for &clmrel; is in the &KnownBugs; file, and the list of issues for -&cesmrel; is at... - -&cesmwebmodelrel;/tags/cesm1_0_3/#PROBLEMS -. - - - - - - - - diff --git a/doc/UsersGuide/ptclm.xml b/doc/UsersGuide/ptclm.xml deleted file mode 100644 index 6c0ef2aec8..0000000000 --- a/doc/UsersGuide/ptclm.xml +++ /dev/null @@ -1,992 +0,0 @@ - - -How to run &ptclm; - -&ptclm; (pronounced point clime) is a Python script to help you set up PoinT CLM -simulations. It runs the &clm; tools for you to get datasets set up, and copies them -to a location you can use them according to the &CLMUSRDAT; naming convention. Then -it runs create_newcase for you and modifies the env settings and -namelist appropriately. &ptclm; has a simple ASCII text file for storing basic -information for your sites. We also have complete lists for AmeriFlux and Fluxnet-Canada -sites, although we only have the meteorology data for one site. For other sites you -will need to obtain the meteorology data and translate it to a format that the &cesm; -datm model can use. But, even without meteorology data &ptclm; is useful to setup -datasets to run with standard &CLMQIAN; data. - - - -The original authors of &ptclm; are: Daniel M. Ricciuto, Dali Wang, Peter E. Thornton, -Wilfred M. Post all at Environmental Sciences Division, Oak Ridge National Laboratory -(ORNL) and R. Quinn Thomas at Cornell University. It was then modified -fairly extensively by Erik Kluzek at &ncar;. We want to thank all of these individuals -for this contribution to the &cesm; effort. We also want to thank the folks at -University of Michigan Biological Stations (US-UMB) who allowed us to use their Fluxnet -station data and import it into our inputdata repository, especially Gil Bohrer the -PI on record for this site. - - - -Introduction to PTCLM - -To get help on &ptclm; use the "--help" option as follows. - -> cd scripts/ccsm_utils/Tools/lnd/clm/PTCLM -> ./PTCLM.py --help - - - -The output to the above command is as follows: - - - -&ptclm_help; - - - - -Here we give a simple example of using &ptclm; for a straightforward case of running -at the US-UMB Fluxnet site on bluefire where we already have the meteorology data on -the machine. Note, see for permission information -to use this data. - -Example of running &ptclm; for US-UMB on bluefire - -setenv CSMDATA /fis/cgd/cseg/csm/inputdata -setenv MYCSMDATA $HOME/inputdata -setenv SITE US-UMB -setenv MYMACH bluefire -setenv MYCASE testPTCLM - -# First link the standard input files to a location you have write access -cd scripts -./link_dirtree $CSMDATA $MYCSMDATA - -# Next build all of the clm tools you will need -cd ../models/lnd/clm/tools/mksurfdata -gmake -gmake clean -cd ../mkdatadomain -gmake -gmake clean -cd ../mkgriddata -gmake -gmake clean -# next run PTCLM (NOTE -- MAKE SURE python IS IN YOUR PATH) -cd ../../../../../scripts/ccsm_utils/Tools/lnd/clm/PTCLM -./PTCLM.py -m $MYMACH --case=$MYCASE --site=$SITE --csmdata=$MYCSMDATA \ - --aerdepgrid --ndepgrid -# NOTE: we use --aerdepgrid --ndepgrid so that you use the global -# aerosol and Nitrogen deposition files rather than site-specific ones. -cd ../../../../../$MYCASE -# Finally configure, build, and run the case as normal - - - - - - -Guide to the options of &ptclm; - -There are three types of options to &ptclm;: required, configure/run-time, and -dataset generation options. The three required options are the three settings that -MUST be specified for &ptclm; to work at all. The other settings have default -values that will default to something useful. The configure/run-time options control -how the simulation will be setup and run. The dataset generation options control -the generation of datasets needed when &ptclm; is run. Most options use a double -dash "--" "longname" such as "--list", but the most common options also have a short-name -with a single dash (such as -m instead of --machine). - - -The required options to &ptclm; are: inputdata directory (-d), machine (-m) and -site-name (-s). Inputdata directory is the directory where you have the &cesm; -inputdata files, you need to have write access to this directory, so if you are -running on a machine that you do NOT have write access to the standard inputdata -location (such as &ncar; bluefire or ORNL jaguar) you need -to link the standard files to a location you do have control over. We recommend -using the scripts/link_dirtree tool to do that. "machine" is -the scripts name for the machine/compiler you will be using for your case. And -finally site-name is the name of the site that you want to run for. Site-name -can either be a valid &CLM1PT; supported dataset name or a Fluxnet site name -from the list of sites you are running on (see the --sitegroupname for more information -about the site lists). - - -After &ptclm; is run a case directory where you can then configure, build and run -your &cesm; case as normal. It also creates a README.PTCLM -in that directory that documents the commandline options to &ptclm; that were used -to create it. - - -After "help" the "list" option is one of the most useful options for getting -help on using &ptclm;. This option gives you information about some of the other -options to &ptclm;. To get a list of the machine, sites, and compsets that can be -used for &ptclm; use the "--list" option as follows. - -> cd scripts/ccsm_utils/Tools/lnd/clm/PTCLM -> ./PTCLM.py --list - - - -The output to the above command is as follows: - - - -&ptclm_list; - - - - -Overview on using &ptclm; - -Steps in running &ptclm; - -Setup Inputdata directory with write access (use -<command>link_dirtree</command> script) - -You need to setup an inputdata directory where you have write access to it. -Normally, for &ncar; machines the data is on an inputdata where the user -does NOT have write access to it. A way that you can get around this is -to use the link_dirtree script to create softlinks from -the normal location to a location you have write access to. So for example -on bluefire: - -> setenv CSMDATA /fs/cgd/csm/inputdata -> setenv MYCSMDATA $HOME/inputdata -> mkdir $MYCSMDATA -> cd scripts -> ./link_dirtree $CSMDATA $MYCSMDATA - -See for more information on this. - - - -Build the &clm; tools - -Next you need to make sure all the &clm; &FORTRAN; tools are built. - -> cd models/lnd/clm/tools/mkgriddata -> gmake -> gmake clean -> cd ../mkdatadomain -> gmake -> gmake clean -> cd ../mksurfdata -> gmake -> gmake clean - - - - -Run &ptclm; - -Next you actually run &ptclm; which does the different things listed below: - - - -&ptclm; names your case based on your input - -&ptclm; names you case based on the input you give to it. - -[Prefix_]SiteCode_Compset[_QIAN][_spinuptype] -Where: - Prefix is from the caseidprefix option (or blank if not used). - SiteCode is the site name you entered with the -s option. - Compset is the compset name you entered with the -c option. - _QIAN is part of the name only if the useQIAN is used. - _spinuptype is part of the name if one of: ad_spinup, exit_spinup, or - final_spinup is used, and the exact spinup name chosen is used. - -For example, the casename for the following will be: - -> cd scripts -> ./PTCLM.py -m bluefire -s US-UMB -d $MYCSMDATA -c I_2000_CN --ad_spinup --useQIAN - -"US-UMB_I_2000_CN_QIAN_ad_spinup". - - - -&ptclm; creates datasets for you - -It will populate $MYCSMDATA with new datasets it creates using the -&clm; tools. - - - -If a transient compset and &ptclm; finds a <filename>_dynpftdata.txt</filename> -file - -If you are running a transient compset (such as the "I_1850-2000_CN" compset) -AND you there is a file in the PTCLM_sitedata directory under -the &ptclm; directory called $SITE_dynpftdata.txt it will use -this file for the land-use changes. Otherwise it will leave land-use constant, unless -you use the pftgrid option so it uses the global dataset for landuse changes. -See for more information on this. There -is a sample transient dataset called US-Ha1_dynpftdata.txt. -Transient compsets, are compsets that create transient land-use change and -forcing conditions such as: -'I_1850-2000', 'I_1850-2000_CN', 'I_RCP8.5_CN', 'I_RCP6.0_CN', 'I_RCP4.5_CN', -or 'I_RCP2.6_CN'. - - - -&ptclm; creates a <filename>pft-physiology</filename> for you - -&ptclm; will create a local copy of the pft-physiology -specific for your site that you could then customize with changes specific -for that site. - - - -&ptclm; creates a <filename>README.PTCLM</filename> for you - -&ptclm; will create a simple text file with the command line for it in a file -called README.PTCLM in the case directory it creates for you. - - - - - - -Customize, configure, build and run case as normal - -You then customize your case as you would normally. See the chapter for more information on doing this. - - - - - - - -Details on the options of &ptclm; - -Next we discuss the configure and run-time options, dividing them up into -configure, spinup, and run-time options. - - -Configure options include: - --c MYCOMPSET, --compset=MYCOMPSET ---caseidprefix=MYCASEID ---cesm_root=BASE_CESM ---namelist=NAMELIST ---rmold ---scratchroot=SCRATCHROOT ---sitegroupname=SITEGROUP ---QIAN_tower_yrs ---useQIAN - - - - - ---compset - -The "-c" option is the most commonly used option after the required options, as it -specifies the &cesm; scripts component set to use with &ptclm;. The default compset -is the "ICN" compset with CN on for present day conditions. - - - - - ---caseidprefix - -This option gives a prefix to include in the casename when the case is created, in -case you want to customize your casenames a bit. By default, casenames are figured -out based on the other options. The argument to this option can either be a name to -prefix casenames with and/or a pathname to include. Hence, if you want cases to -appear in a specific directory you can give the pathname to that directory with this -option. - - - - - ---cesm_root - -This option is for running &ptclm; with a different root directory to &cesm; than the -version &ptclm; exists in. Normally you do NOT need to use this option. - - - - - ---namelist - -This option adds any items given into the &clm; &usernlclm; namelist. This allows you to -add customizations to the namelist before the clm.buildnml.csh file -is created for the case. - - - - - ---rmold - -This option will remove an old case directory of the same name if one exists. Otherwise, -if an old case directory already exists and you try to run &ptclm; it will return with -an error. - - - - - ---scratchroot - -This option is ONLY valid when using one of the generic machines (the -m option). -This passed onto create_newcase and gives the location where cases -will be built and run. - - - - - ---sitegroupname - -In the &ptclm; directory there is a subdirectory "PTCLM_sitedata" that contains -files with the site, PFT and soil data information for groups of sites. These site groups -are all separate ASCII files with the same prefix followed by a "_*data.txt" name. -See for more information on these files. -By default we have provided three different valid group names: - -EXAMPLE -AmeriFlux -Fluxnet-Canada - -The EXAMPLE is the group used by default and ONLY includes the US-UMB site as that -is the only site we have data provided for. The other two site groups include the -site information for all of both the AmeriFlux and Fluxnet-Canada sites. You can use -the "sitegroupname" option to use one of the other lists, or you can create your own -lists using the EXAMPLE file as an example. Your list of sites could be real world -locations or could be theoretical "virtual" sites given to exercise &clm; on -differing biomes for example. Note, see with -permission information to use the US-UMB data. - - - - - ---useQIAN - -This option says to use the standard &clm; global Qian T62 atmospheric forcing rather -than any tower site forcing data available. Otherwise, &ptclm; will try to find tower -forcing data for the specific site entered. - - - - - ---QIAN_tower_yrs - -This option is used with the "useQIAN" option to set the years to cycle over for -the Qian data. In this case Qian atmospheric forcing will be used, but the -simulation will run over the same years that tower site is available for this site. - - - - - - -Spinup options include: - ---coldstart ---ad_spinup ---exit_spinup ---final_spinup ---finidat=FINIDAT - - - - -The spinup options enable the different CN spinup modes, but also set the run -length. The coldstart option says to startup with OUT an initial condition file, while -the finidat option explicitly gives the initial condition file to use. Obviously, -the different spinup options can NOT be used together, nor can the coldstart and -finidat options be either. - - ---coldstart - -This option ensures that a cold-start will be done with arbitrary initial conditions. - - - - - ---ad_spinup - -This option enables the accelerated decomposition mode when a CN compset is used. It -also sets the run-length as given in the example for running exit spinup in -. - - - - - ---exit_spinup - -This option enables the exit spinup mode when a CN compset is used. It also sets the -run-length to a year just as given in the example for running exit spinup in -. - - - - - ---final_spinup - -This option sets the run length as given in the example for a final spinup in -. This option can be used for any compset. - - -There is a bug in the final_spinup mode for setting the run length. Because of the -bug, final_spinup mode only runs for a very short time, you'll need to edit -the run length by hand to be 50 years. See bug 1367 in the &KnownBugs; file. - - - - - - - ---finidat - -This option sets the initial condition file to startup the simulation from. - - - - - - - - - -Run-time options include: - ---debug ---run_n=MYRUN_N ---run_units=MYRUN_UNITS ---stdurbpt - - - - - - - ---debug - -This option tells &ptclm; to echo what it would do if it were run, but NOT actually -run anything. So it will show you the dataset creation commands it would use. -It does however, run create_newcase, but then it only displays -the xmlchange commands and changes that it would do. Also note -that if you give the "--rmold" option it won't delete the case directory beforehand. -Primarily this is intended for debugging the operation of &ptclm;. - - - - - ---run_n - -This option along with run_units is used to set the length for the simulation. "run_n" -is the number of units to use. -The default run length depends on the site, compset, -and configuration and for example if a "spinup" option is selected. - - - - - ---run_units - -This option is the units of time to use for the length of the simulation. It is used -along with "run_n" to set the length of the simulation. -The default run length depends on the site, compset, -and configuration and for example if a "spinup" option is selected. - - - - - ---stdurbpt - -This option turns on the "stdurbpt_pd" use-case for &CLMUSECASE;. This option -can NOT be used for compsets that set the use-case to something besides present-day. - - - - - - - - -Lastly we discuss the dataset generation options. The dataset generation options are: - ---aerdepgrid ---ndepgrid ---pftgrid ---soilgrid ---nopointdata ---owritesrfaer - - - - -The options that with a "grid" suffix all mean to create datasets using the global -gridded information rather than using the site specific point data. By default the -site specific point data is used. The "nopointdata" and "owritesrfaer" options have to -do with file creation. - - -Because supported single-point datasets already have the data created for them, you -MUST use the "nopointdata" and "ndepgrid" options when you are using a supported -single-point site. You must use "ndepgrid" even for a compset without CN. You also -can NOT use the options: "soilgrid", "pftgrid", "aerdepgrid", or "owritesrfaer". - - - - - ---aerdepgrid - -This option says to use the aerosol deposition files from the global dataset rather -than creating an interpolated version. - - -This option must NOT be used when you you are using a site that -is a supported single point dataset. - - - - - ---ndepgrid - -This option says to use the Nitrogen deposition files from the global dataset rather -than creating an interpolated version. This is only needed for compsets with CN. - - - -This option is required when you you are using a site that -is a supported single point dataset. This is true even when you are NOT using a -compset with CN. - - - - - - ---pftgrid - -This option says to use the PFT values provided on the global dataset rather than -using the specific site based values from the -PTCLM_sitedata/*_pftdata.txt file when creating the surface dataset. - - -This option must NOT be used when you you are using a site that -is a supported single point dataset. - - - - - ---soilgrid - -This option says to use the soil values provided on the global dataset rather than -using the specific site based values from the -PTCLM_sitedata/*_soildata.txt file when creating the surface dataset. - - -This option must NOT be used when you you are using a site that -is a supported single point dataset. - - - - - ---nopointdata - -This option says to NOT create any input datasets -- assume this step has already been -done. If datasets weren't already created, your case will fail when you try to run it. -In general the first time you run &ptclm; for a new site you want it to generate new -datasets, but the next time and future times you want to use this option so that it -doesn't waste a lot of time rebuilding datasets over again. - - - -This option is required when you you are using a site that -is a supported single point dataset. - - - - - - ---owritesrfaer - -This option says to overwrite any surface and/or aerosol deposition datasets that -were already created. Otherwise, the creation of these files will be skipped if a file -is already found (but it WILL create files if they don't exist). - - -This option must NOT be used when you you are using a site that -is a supported single point dataset. - - - - - - - - -Note on the aerosol and Nitrogen deposition files. When the "aerdepgrid" and "ndepgrid" -options are NOT used -- aerosol and Nitrogen deposition files will be created by -interpolating from the global datasets. However, after these interpolated files -are created you could customize them for your site with data that you provide. You -could then write protect the files and use the "nopointdata" option so that &ptclm; -doesn't try to overwrite them in the future. - - - - - - -Examples using &ptclm; - -Now let's give a few more complex examples using some of the options we have -discussed above. - - -In this first example, we'll demonstrate using a supported single point dataset, -which then requires using the "nopointdata" and "ndepgrid" options. We'll also -demonstrate the compset option, "stdurbpt" and "caseidprefix" options. - -Example of running &ptclm; for the Mexicocity supported single point -dataset - -> cd scripts/ccsm_utils/Tools/lnd/clm/PTCLM -> ./PTCLM.py -m bluefire -s 1x1_mexicocityMEX -d $CSMDATA --nopointdata --ndepgrid \ ---stdurbpt -c I --caseidprefix `pwd`/myPTCLMcases/site -> cd myPTCLMcases/site_1x1_mexicocityMEX_I -> ./configure -case -# Now build and run normally -> ./site_1x1_mexicocityMEX_I.bluefire.build -# Here we show running interactively -> ./site_1x1_mexicocityMEX_I.bluefire.run - - - - - -Now, let's demonstrate using a different group list, doing a spinup, running with Qian -global forcing data, but using tower years to set the years to run over. This uses -the options: sitegroupname, ad_spinup, useQIAN, and QIANtower_years. - -Example of running &ptclm; for a spinup simulation with Qian data for tower years. - - -> cd scripts/ccsm_utils/Tools/lnd/clm/PTCLM -> ./PTCLM.py -m bluefire -s US-Ha1 -d $CSMDATA --sitegroupname AmeriFlux \ ---ad_spinup --useQIAN --QIAN_tower_yrs -> cd ../../../../../US-Ha1_ICN_QIAN_ad_spinup -> ./configure -case -# Now build and run normally -> ./US-Ha1_ICN_QIAN_ad_spinup.bluefire.build -# Here we show running interactively -> ./US-Ha1_ICN_QIAN_ad_spinup.bluefire.run - - - - - -Finally, let's demonstrate using a generic machine (which then requires the scratchroot -option), using the global grid for PFT and soil types, and setting the run length -to two months. - -Example of running &ptclm; on a generic machine with global PFT and soil types -dataset - -> cd scripts/ccsm_utils/Tools/lnd/clm/PTCLM -# Note, see the with permission information -# to use the US-UMB data. -> ./PTCLM.py -m generic_darwin_intel -s US-UMB -d $CSMDATA --pftgrid --soilgrid \ ---scratchroot $HOME --run_n 2 --run_units nmonths -> cd ../../../../../US-UMB_ICN -> ./configure -case -# Now build -> ./US-UMB_ICN.generic_darwin_intel.build -# To get the files from the svn server... -# First list the files from the streams text file -> ../ccsm_utils/Tools/listfilesin_streams \ --t $HOME/US-UMB_ICN/run/clm1PT.1x1pt_US-UMB.stream.txt -l \ -> Buildconf/datm.input_data_list -# And now run the script to export data to your machine -> ../ccsm_utils/Tools/check_input_data -export -# Here we show running interactively -> ./US-UMB_ICN.generic_darwin_intel.run - - - - -Because of Bug 1364, when running this case as above we get a floating point -error after reaching time-step 124 for the example exactly as above. Other -machines or compilers probably won't have this problem. See the &KnownBugs; file -for more information on this problem. - - - - -As documented in Bug 1368, spinning up the US-UMB site for a I2000CN compset gives -zero Gross Primary Production (GPP). If the user wishes to use this site for &clmcn;, -they'll need to address this issue. -See the &KnownBugs; file for more information on this problem. - - - - - - -Adding data for use by &ptclm; - -&ptclm; Group Site Lists - -The "sitegroupname" option to &ptclm; looks for groups of sites in the -files in the PTCLM_sitedata directory under the &ptclm; directory. -You can add new names available for this option including your own lists of sites, by -adding more files in this directory. There are three files for each "sitegroupname": -$SITEGROUP_sitedata.txt, -$SITEGROUP_soildata.txt -and $SITEGROUP_pftdata.txt (where $SITEGROUP is the name that would -be entered as "sitegroupname" to &ptclm;). Each file needs to have the same list of sites, -but gives different information: site data, PFT data, and soil data respectively. -Although the site codes need to be the same between the three files, the files do NOT -have to be in the same order. Each file has a one-line header that lists the contents -of each column which are separated by commas. The first column for each of the files -is the "site_code" which must be consistent between the three files. The site code -can be any unique character string, but in general we use the AmeriFlux site code. - - -Site data file: $SITEGROUP_sitedata.txt): The header for -this file is: - -site_code,name,state,lon,lat,elev,startyear,endyear,alignyear - -The columns: name, state, and elevation are informational only. Name is a longer -descriptive name of the site, and state is the state for U.S. sites or country -for non U.S. sites. The columns: lon and lat are the longitude and latitude of -the location in decimal degrees. The last three columns are the start and ending -year for the data and the align year for an 1850 case for the data. The align year -is currently unused. - - -Soil data file: $SITEGROUP_soildata.txt): The header for this -file is: - -site_code,soil_depth,n_layers,layer_depth,layer_sand%,layer_clay% - -The first three fields after "site_code" are currently unused. The only two that -are used are the percent sand and clay columns to set the soil texture. - - -PFT data file: $SITEGROUP_pftdata.txt): The header for this -file is: - -site_code,pft_f1,pft_c1,pft_f2,pft_c2,pft_f3,pft_c3,pft_f4,pft_c4,pft_f5,pft_c5 - -This file gives the vegetation coverage for the different vegetation types for the site. -The file only supports up to five PFT's at the same time. The columns with "pft_f" are -the fractions for each PFT, and the columns with "pft_c" is the integer index of the -given PFT. Look at the pft-physiology file to see what the PFT index for each PFT type -is. - - - - -Dynamic Land-Use Change Files for use by &ptclm; - -There is a mechanism for giving site-specific land-use change in &ptclm;. Adding -site specific files to the PTCLM_sitedata directory under -&ptclm; allows you to specify the change in vegetation and change in harvesting -(for the CN model) for that site. Files are named: -$SITE_dynpftdata.txt. There is a sample file for the US-Ha1 -site called: US-Ha1_dynpftdata.txt. The file has a one-line -header with the information that the file has, and then one-line for each year -with a transition. The header line is as follows: - -trans_year,pft_f1,pft_c1,pft_f2,pft_c2,pft_f3,pft_c3,pft_f4,pft_c4,pft_f5,pft_c5,har_vh1,har_vh2,har_sh1,har_sh2,har_sh3,graze,hold_harv,hold_graze - -This file only requires a line for each year where a transition or harvest happens. As -in the "pftdata" file above "pft_f" refers to the fraction and "pft_c" refers to the -PFT index, and only up to five vegetation types are allowed to co-exist. The last -eight columns have to do with harvesting and grazing. The last two columns are whether -to hold harvesting and/or grazing constant until the next transition year and will -just be either 1 or 0. This file will be converted by the -PTCLM_sitedata/cnvrt_trnsyrs2_landuse_timeseries_txtfile.pl script in the &ptclm; -directory to a format that mksurfdata can read that has an entry -for each year for the range of years valid for the compset in question. - - - - -Converting AmeriFlux Data for use by &ptclm; - -AmeriFlux data comes in comma separated format and is available from: - -http://public.ornl.gov/ameriflux/dataproducts.shtml. Before you -download the data you need to agree to the usage terms. - - -Here is a copy of the usage terms from the web-site on June/13/2011. - - -"The AmeriFlux data provided on this site are freely available and were furnished by -individual AmeriFlux scientists who encourage their use. Please kindly inform the -appropriate AmeriFlux scientist(s) of how you are using the data and of any publication -plans. Please acknowledge the data source as a citation or in the acknowledgments if the -data are not yet published. If the AmeriFlux Principal Investigators (PIs) feel that they -should be acknowledged or offered participation as authors, they will let you know and we -assume that an agreement on such matters will be reached before publishing and/or use of -the data for publication. If your work directly competes with the PI's analysis they may -ask that they have the opportunity to submit a manuscript before you submit one that uses -unpublished data. In addition, when publishing, please acknowledge the agency that -supported the research. Lastly, we kindly request that those publishing papers using -AmeriFlux data provide preprints to the PIs providing the data and to the data archive at -the Carbon Dioxide Information Analysis Center (CDIAC)." - - -The above agreement applies to the "US-UMB" dataset imported into our repository as -well, and Gil Bohrer is the PI on record for that dataset. - - -The &cesm; can NOT handle missing data, so we recommend using the "Level 4" Gap filled -datasets. -The fields will also need to be renamed. The "WS" column becomes "WIND", "PREC" becomes -"PRECmms", "RH" stays as "RH", "TA" becomes "TBOT", "Rg" becomes "FSDS", "Rgl" becomes -"FLDS", "PRESS" becomes "PSRF". "ZBOT" can just be set to the constant of "30" (m). -The units of Temperature need to be converted from "Celsius" to "Kelvin" (use the -value in SHR_CONST_TKFRZ in the file -models/csm_share/shr/shr_const.F90 of 273.15. -The units of Pressure also need to be converted from "kPa" to "Pa". LATIXY, and -LONGXY should also be set to the latitude and longitude of the site. - - - -&ptclm; transient example over a shorter time period - - -Example of running &ptclm; for transient land-use 1991-2006 for US-Ha1 on bluefire - -This is an example of using &ptclm; for Harvard Forest (AmeriFlux site code US-Ha1). In -order to do this we would've needed to have converted the AmeriFlux data into &netcdf; -format as show in the section above. Also note -that this site has a site-specific dynamic land-use change file for it -PTCLM_sitedata/US-Ha1_dynpftdata.txt in the &ptclm; directory -and this file will be used for land-use change and harvesting rather than the -global dataset. - - -> cd scripts/ccsm_utils/Tools/lnd/clm/PTCLM -# We are going to use forcing data over 1991 to 2006, but we need to start with -# a transient compset to do so, so we use the 20th Century transient: 1850-2000 -# Note: When creating the flanduse_timeseries dataset for this site it will use the -# PTCLM_sitedata/US-Ha1_dynpftdata.txt -# file for land-use change and harvesting -> ./PTCLM.py -m bluefire -s US-Ha1 -d $MYCSMDATA --sitegroupname AmeriFlux \ --c I_1850-2000_CN -> mkdir $MYCSMDATA/atm/datm7/CLM1PT_data/1x1pt_US-Ha1 -> cd $MYCSMDATA/atm/datm7/CLM1PT_data/1x1pt_US-Ha1 -# Copy data in &netcdf; format to this directory, filenames should be YYYY-MM.nc -# The fieldnames on the file should be: -# FLDS,FSDS,LATIXY, LONGXY, PRECTmms,PSRF,RH,TBOT,WIND,ZBOT -# With units -# W/m2,W/m2,degrees_N,degrees_E,mm/s, Pa, %, K, m/s, m -# The time coordinate units should be: days since YYYY-MM-DD 00:00:00 -> cd ../../../../../US-Ha1_I_1850-2000_CN -# We need to turn cold-start on, so it doesn't expect an initial condition file -# preferably, you would generate your own initial condition file and then use -# the --finidat option to &ptclm; to enter it -> ./xmlchange -file env_conf.xml -id CLM_FORCE_COLDSTART -val on -# Now we need to set the start date to 1991, and have it cycle forcing data -# over 1991 to 2006 -> ./xmlchange -file env_conf.xml -id RUN_STARTDATE -val 1991-01-01 -> ./xmlchange -file env_conf.xml -id DATM_CLMNCEP_YR_ALIGN -val 1991 -> ./xmlchange -file env_conf.xml -id DATM_CLMNCEP_YR_END -val 2006 -> ./xmlchange -file env_conf.xml -id CLM_NAMELIST_OPTS -val \ -# Similarly for Nitrogen deposition data we cycle over: 1991 to 2006 -"model_year_align_ndep=1991,stream_year_first_ndep=1991,stream_year_last_ndep=2006" -# Now configure the case, and we'll edit the datm namelist for prescribed aerosols -> ./configure -case -# We also need to change the datm to run with aerosols over the 1991-2006 period -cat << EOF > patch.diff -*** datm.buildnml.csh.orig 2011-06-14 09:28:20.000000000 -0600 ---- datm.buildnml.csh 2011-06-14 09:28:57.000000000 -0600 -*************** -*** 32,38 **** - dataMode = 'CLMNCEP' - domainFile = '$DOMAINFILE' - streams = 'clm1PT.1x1pt_US-Ha1.stream.txt 1991 1991 2006 ', -! 'presaero.stream.txt 1849 1849 2006' - vectors = 'null','null' - mapmask = 'nomask','nomask' - mapalgo = 'nn','nn' ---- 32,38 ---- - dataMode = 'CLMNCEP' - domainFile = '$DOMAINFILE' - streams = 'clm1PT.1x1pt_US-Ha1.stream.txt 1991 1991 2006 ', -! 'presaero.stream.txt 1991 1991 2006' - vectors = 'null','null' - mapmask = 'nomask','nomask' - mapalgo = 'nn','nn' -EOF -# Apply the above patch to the datm build namelist file -> patch Buildconf/datm.buildnml.csh patch.diff - - - - - - -Because of bug 1361, this won't work out of the box. You'll need to add the change -to PTCLM.py given in the KnownBugs file on this issue. - - - - - - - - -A bit about the structure of &ptclm;, what it does, and how it works - -A large part of &ptclm; just sets up the different options and does error checking -on the options given. &ptclm; then uses the options provided to use -create_newcase to create a new case. It then queries both the -case directory and/or the XML database (using -queryDefaultNamelist.pl in models/lnd/clm/bld -and does other settings for the case. It then runs the different &clm; tools in turn to -create the necessary datasets and points to them in the case with the &CLMUSRDAT; option. -It runs mkgriddata, mksurfdata.pl, and -mkdatadomain as well as the aerdepregrid.ncl and -ndepregrid.ncl &ncl; scripts. mkgriddata and -mkdatadomain have template namelist files in the -scripts/ccsm_utils/Tools/lnd/clm/PTCLM/usr_files directory. -When running mksurfdata.pl if it finds a -$SITE_dynpftdata.txt in the -scripts/ccsm_utils/Tools/lnd/clm/PTCLM/PTCLM_sitedata directory -it will use that file for transient landuse changes (there's a sample file for -"US-Ha1" called US-Ha1_dynpftdata.txt). -It modifies the different env*.xml using -xmlchange and creates an initial &usernlclm; filename. After -&ptclm; is run you can then make changes to the case by hand, and configure, build -and run as normal. - - -There is a simple test script to test &ptclm;. See -for more information on using it. - - - - - diff --git a/doc/UsersGuide/single_point.xml b/doc/UsersGuide/single_point.xml deleted file mode 100644 index b07f76ecb6..0000000000 --- a/doc/UsersGuide/single_point.xml +++ /dev/null @@ -1,930 +0,0 @@ - - - -How to run Single-Point/Regional cases - -The &clm; also allows you to set up and run cases with a single-point or a local region as well -as global resolutions. This is often useful for running quick cases for testing, evaluating -specific vegetation types, or land-units, or running with observed data for a specific site. -There are four different ways to do this: &PTSMODE;, -&CLM1PT;, &CLMUSRDAT;, and with &ptclm;. - -&PTSMODE; -- to run for a single point -using global datasets. -&CLM1PT; -- to run for a supported single-point -or regional dataset. -&CLMUSRDAT; -- to run using your own datasets (single-point -or regional). -&ptclm; -- to easily setup simulations to run for -tower sites.. - - - - -&PTSMODE; and &ptclm; only work for a single point, while the other two options can -also work for regional datasets as well. - - - -Which Single Point Option Should I choose? - -In general is the quick and dirty method -that gets you started without having to create datasets -- but has limitations. It's -good for an initial attempt at seeing results for a point of interest, but since you -can NOT restart with it, it's usage is limited. It is the quickest method as you can -create a case for it directly from create_newcase. Although you -can't restart, running a single point is very fast, and you can run for long -simulation times even without restarts. If you need restarts a good solution is to use -getregional_datasets.pl and &CLMUSRDAT; -which can get you running almost as quickly as well as -&PTSMODE;. Like -&PTSMODE; - only runs for points that exist within -a global dataset. - - -Running &CLM1PT; is a great solution, if one of the supported -single-point/regional datasets, is your region of interest (see -). All the datasets are -created for you, and you can easily select one and run, pretty much, out of the box -with it. The problem is that there is a very limited set of supported datasets. You -can also use this method for your own datasets, but you have to create the datasets, -and add them to the XML database and to the &datm;. This is worthwhile if you want to -repeat many multiple cases for a given point or region. - - -Next, &CLMUSRDAT; is the best way to setup cases quickly -where you have to create your own datasets (see -). With this method you don't have to -change &datm; or add files to the XML database -- but you have to follow a strict -naming convention for files. However, once the files are named and in the proper -location, you can easily setup new cases that use these datasets. This is good -for treating all the required datasets as a "group" and for a particular -model version. For advanced &clm; developers who need to track dataset changes with -different model versions you would be best off adding these datasets as supported -datasets with the &CLM1PT; method. - - -Lastly &ptclm; is a great way to easily create datasets, -setup simulations and run simulations for tower sites. It takes advantage of both -&CLM1PT; and &CLMUSRDAT; internally. A big advantage to it, is that it's one-stop -shopping, it runs tools to create datasets, and runs create_newcase -and sets the appropriate env variables for you. So you only have to learn how to run -one tool, rather than work with many different ones. &ptclm; is described in the next -chapter . - - -Finally, if you also have meteorology data that you want to force your &clm; simulations -with you'll need to setup cases as described in . -You'll need to create &clm; datasets either according to &CLM1PT; -or &CLMUSRDAT;, but you'll also need to modify &datm; to use -your forcing data. And you'll need to change your forcing data to be in a format that -&datm; can use. In the &ptclm; chapter the -section tells you how to use AmeriFlux data for atmospheric forcing. - - - - -Running &PTSMODE; configurations - -&PTSMODE; enables you to run the model using global datasets, but just picking a -single point from those datasets and operating on it. It can be a very quick way to do fast -simulations and get a quick turnaround. - - -To setup a &PTSMODE; simulation you use the "-pts_lat" and "-pts_lon" -arguments to create_newcase to give the latitude and longitude of the point you want to -simulate for (the code will pick the point on the global grid nearest to the point you -give. Here's an example to setup a simulation for the nearest point at 2-degree resolution -to Boulder Colorado. - -> cd scripts -> ./create_newcase -case testPTS_MODE -res f19_g16 -compset I -mach bluefire \ --pts_lat 40.0 -pts_lon -105 -> cd testPTS_MODE -# We make sure the model will start up cold rather than using initial conditions -> ./xmlchange -file env_conf.xml -id &CLMFORCECOLD; -val on -> ./xmlchange -file env_conf.xml -id RUN_TYPE -val startup - -Then configure, build and run as normal. We make sure initial conditions are NOT used -since &PTSMODE; currently CAN NOT run with initial conditions. - - - -By default it sets up to run with -USE_MPISERIAL (in the env_build.xml file) turned on, -which allows you to run the model interactively. On some machines this mode is NOT -supported and you may need to change it to FALSE before you are able to build. - - - - -&PTSMODE; currently does NOT restart nor -is it able to startup from global initial condition files. See bugs "1017 and 1025" -in the KnownLimitations file. - - - - -You can change the point you are simulating for at run-time by changing the values of -PTS_LAT and PTS_LON in the env_run.xml file. - - - - -Note, that when running with &PTSMODE; the number of processors -is automatically set to one. When running a single grid point you can only use a single -processor. You might also want to set the "env_conf" variable: USE_MPISERIAL to -TRUE so that you can also run interactively without having to use -&mpi; to start up your job. - - - - -Warning about Running with a Single-Processor on a Batch Machine - -This problem always comes up when running for a single point, because you can only use -a single-processor, but may come up in other instances when you are running with -one processor. This applies to all the different ways of running in single-point mode. - - - -A warning for submitting single-point simulations to the batch que when only using -one processor. On many machines this will mean using up at least an entire node, and -being charged for all the CPU's on that node even if you aren't using them. For example, -on the &ncar; machine bluefire, there are 32 processors for each node -and the batch scripts are setup to have exclusive use of that node (and hence be charged -for all 32 processors). There are similar issues on other machines, below we show you -what to do when running on bluefire. - - -To change this on bluefire -- change the following: - -#BSUB -q regular -#BSUB -N -#BSUB -x - -to... - -#BSUB -q share -#BSUB -N - -so remove the "#BSUB -x" which gives you the entire node exclusively, and change to the -share que. One other machines you may have to do something similar, but the particulars -depend on the given machine, hence you will need to consult with the system -administrators for the given machine you are running on. - - - - -Another similar problem on many machines is that some batch ques have a minimum number -of nodes or processors that can be used. On these machine you may have to change the -queue (in some way similar to the above for bluefire) and possibly the time-limits of -the job, to get it to run in the batch que. - - - -Another way to get around this problem is to run the job interactively using -USE_MPISERIAL so that you don't submit the job to the batch que. -For single point mode you also may want to consider using a smaller workstation or -cluster, rather than a super-computer, because you can't take advantage of the -multi-processing power of the super-computer anyway. - - - - -Running Supported Single-point/Regional Datasets - -In addition to &PTSMODE; the &clm; supports running using single-point or -regional datasets that are customized to a particular region. In the section below we -tell the user how to create their own dataset, but we also support a small number of -single-point and regional datasets that are ready to setup and run in the CESM modeling -system. - - -To get the list of supported dataset resolutions see the method given in the -section on use of &CLM1PT;, which results in the following: - -&res_list; - -The resolution names that have an underscore in them ("_") are all single-point or -regional resolutions. -To run with the supported single-point and regional datasets, you setup a simulation for the -"pt1_pt1" resolution and give the short-name for the file to use in the -env_conf.xml file. - - -To run for the Brazil test site -do the following: - -Example of running &clm; over a single-point test site in Brazil -with the default Qian atmosphere data forcing. - - -> cd scripts -> ./create_newcase -case testSPDATASET -res pt1_pt1 -compset I \ --mach bluefire -> cd testSPDATASET -# Configure to run for the test site -> set SITE=1x1_brazil -> ./xmlchange -file env_conf.xml -id &CLMCONFIG; -val "-sitespf_pt $SITE" -> ./xmlchange -file env_conf.xml -id &CLM1PT; -val $SITE - - - - -Then configure, build and run normally. - - -Then to run for the urban Mexico City Mexico test site that also has atmosphere -forcing data, but to run it with the Qian forcing data, but over the period for -which it's own forcing data is provided do the following: - -Example of running &clm; over the single-point of Mexicocity Mexico -with the default Qian atmosphere data forcing. - - -> cd scripts -> ./create_newcase -case testSPDATASET -res pt1_pt1 -compset I \ --mach bluefire -> cd testSPDATASET -# Set a variable to the site you want to use (as it's used several times below) -> set SITE=1x1_mexicocityMEX -# Configure to run for the urban test site -> ./xmlchange -file env_conf.xml -id &CLMCONFIG; -val "-sitespf_pt $SITE" -> ./xmlchange -file env_conf.xml -id &CLM1PT; -val $SITE -# Set &datm; prescribed aerosols to single-point dataset -# Will then use the dataset with just the point for this $SITE -> ./xmlchange -file env_conf.xml -id DATM_PRESAERO -val pt1_pt1 -# -# Set some of the settings that are particular to this site, by values contained -# in the XML database. For some sites, or for new sites this information won't be -# stored. And the queryDefaultNamelist.pl command will abort. -# -# Set &datm; start and end range (optional just to run over the same years that -# atmospheric forcing data is available for this site) -> ./xmlchange -file env_conf.xml -id DATM_CLMNCEP_YR_START -val \ -`../../models/lnd/clm/bld/queryDefaultNamelist.pl -res $SITE \ --namelist default_settings -silent -var datm_cycle_beg_year -justvalue` -> ./xmlchange -file env_conf.xml -id DATM_CLMNCEP_YR_END -val \ -`../../models/lnd/clm/bld/queryDefaultNamelist.pl -res $SITE \ --namelist default_settings -silent -var datm_cycle_end_year -justvalue` - - - - -Then configure, build and run normally. - - - -Just like &PTSMODE; above, By default it sets up to run with -USE_MPISERIAL (in the env_build.xml file) turned on, -which allows you to run the model interactively. On some machines this mode is NOT -supported and you may need to change it to FALSE before you are able to build. - - - - -See for a warning about running single-point jobs -on batch machines. - - - - -Note, that when running a pt1_pt1 resolution the number of processors -is automatically set to one. When running a single grid point you can only use a single -processor. You might also want to set the "env_conf" variable: USE_MPISERIAL to -TRUE so that you can also run interactively without having to use -mpi to start up your job. - - - -Running Supported Single-point Datasets that have their own Atmospheric Forcing - -Of the supported single-point datasets we have three that also have atmospheric forcing data -that go with them: Mexico City (Mexico), Vancouver, (Canada, British Columbia), and -urbanc_alpha (test data for an Urban inter-comparison project). Mexico city and Vancouver -also have "#ifdef" in the source code for them to work with modified urban data -parameters that are particular to these locations. They can be turned on by using -the &CLMCONFIG; &envconf; variable to set the "-sitespf_pt" option in the &clm; -&configure;. To turn on the atmospheric forcing for these datasets, you set the -&envconf; DATM_MODE variable to "CLM1PT", and then the atmospheric -forcing datasets will be used for the point picked. - - -When running with datasets that have their own atmospheric forcing you need to be careful -to run over the period that data is available. If you have at least one year of forcing -it will cycle over the available data over and over again no matter how long of a simulation -you run. However, if you have less than a years worth of data (or if the start date doesn't -start at the beginning of the year, or the end date doesn't end at the end of the year) then -you won't be able to run over anything but the data extent. In this case you will need to -carefully set the RUN_STARTDATE, START_TOD and -STOP_N/STOP_OPTION variables for your case to run over the entire time extent -of your data. For the supported data points, these values are in the XML database -and you can use the queryDefaultNamelist.pl script to query the values -and set them for your case (they are set for the three urban test cases: Mexicocity, Vancouver, and -urbanc_alpha). - - -In the example below we will show how to do this for the Vancouver, Canada point. - - -Example of running &clm; over the single-point of Vancouver Canada with -supplied atmospheric forcing data for Vancouver. - - -> cd scripts -# Create a case at the single-point resolutions -> ./create_newcase -case testSPDATASETnAtmForcing -res pt1_pt1 -compset I \ --mach bluefire -> cd testSPDATASETnAtmForcing -# Set a variable to the site you want to use (as it's used several times below) -> set SITE=1x1_vancouverCAN -> ./xmlchange -file env_conf.xml -id &CLMCONFIG; -val "-sitespf_pt $SITE" -# Now set the CLM single-point variable to the site name -> ./xmlchange -file env_conf.xml -id &CLM1PT; -val $SITE -# Set the aerosols to use the single-point dataset for 2000 conditions -# You could also use the default global dataset, but running would be a bit slower -> ./xmlchange -file env_conf.xml -id DATM_MODE -val CLM1PT -# Set the coupling frequency to once an hour -> ./xmlchange -file env_conf.xml -id ATM_NCPL -val 24 -# Set the standard namelist options for an urban test site -> ./xmlchange -file env_conf.xml -id CLM_NML_USE_CASE -val stdurbpt -# Set many of the settings that are particular to this site, by values contained -# in the XML database. For some sites, or for new sites this information won't be -# stored. And the queryDefaultNamelist.pl command will abort. -# -# Set the start date -> setenv RUN_STARTDATE \ -`../../models/lnd/clm/bld/queryDefaultNamelist.pl -res $SITE \ --namelist default_settings -silent -var run_startdate -justvalue` -> setenv STARTDATE `echo $RUN_STARTDATE | sed s/-//g` -> @ START_YEAR = $STARTDATE / 10000 -> ./xmlchange -file env_conf.xml -id RUN_STARTDATE -val $RUN_STARTDATE -# Set the run length and start time of day -> ./xmlchange -file env_run.xml -id STOP_OPTION \ --val `../../models/lnd/clm/bld/queryDefaultNamelist.pl -res $SITE \ --namelist seq_timemgr_inparm -silent -var stop_option -justvalue` -> setenv STOP_N \ -`../../models/lnd/clm/bld/queryDefaultNamelist.pl -res $SITE \ --namelist seq_timemgr_inparm -silent -var stop_n -justvalue` -> ./xmlchange -file env_run.xml -id STOP_N -val $STOP_N -> ./xmlchange -file env_run.xml -id START_TOD \ --val `../../models/lnd/clm/bld/queryDefaultNamelist.pl -res $SITE \ --namelist seq_timemgr_inparm -silent -var start_tod -justvalue` -# Set &datm; start and end range... -> ./xmlchange -file env_conf.xml -id DATM_CLMNCEP_YR_START -val -`../../models/lnd/clm/bld/queryDefaultNamelist.pl -res $SITE \ --namelist default_settings -silent -var datm_cycle_beg_year -justvalue` -> ./xmlchange -file env_conf.xml -id DATM_CLMNCEP_YR_END -val -`../../models/lnd/clm/bld/queryDefaultNamelist.pl -res $SITE \ --namelist default_settings -silent -var datm_cycle_end_year -justvalue` -# Set the User namelist to set the output frequencies of the history files -# Setting the stdurbpt use-case option create three history file streams -# The frequencies and number of time-samples needs to be set -> cat << EOF > &usernlclm; -&clm_inparm - hist_mfilt = $STOP_N,$STOP_N,$STOP_N - hist_nhtfrq = -1,-1,-1 -/ -EOF -# Set align year to start year as defined above -> ./xmlchange -file env_conf.xml -id DATM_CLMNCEP_YR_ALIGN -val $START_YEAR -# Set &datm; prescribed aerosols to single-point dataset -# Will then use the dataset with just the point for this site -> ./xmlchange -file env_conf.xml -id DATM_PRESAERO -val pt1_pt1 -> ./configure -case - - - - -If you don't set the start-year and run-length carefully as shown above the -model will abort with a "dtlimit error" in the atmosphere model (see bug 1110 in -the KnownLimitations file for documentation on this). Since, the forcing data for -this site (and the MexicoCity site) is less than a year, the model won't be able to -run for a full year. The 1x1_urbanc_alpha site has data for more -than a full year, but neither year is complete hence, it has the same problem (see the -problem for this site above). - - - - -Just like &PTSMODE; above, By default it sets up to run with -USE_MPISERIAL (in the env_build.xml file) turned on, -which allows you to run the model interactively. On some machines this mode is NOT -supported and you may need to change it to FALSE before you are able to build. - - - - -See for a warning about running single-point jobs -on batch machines. - - - - -Note, that when running a pt1_pt1 resolution the number of processors -is automatically set to one. When running a single grid point you can only use a single -processor. You might also want to set the "env_conf" variable: USE_MPISERIAL to -TRUE so that you can also run interactively without having to use -mpi to start up your job. - - - - - - -Creating your own single-point/regional surface datasets - -The file: -Quickstart.userdatasets in the -models/lnd/clm/doc directory gives guidelines on how to create and run -with your own single-point or regional datasets. Below we reprint the above guide. - -&quickstart_userdata; - - - - -Using getregional_datasets.pl to get a complete suite of single-point/regional -surface datasets from global ones - -Use the regional extraction script to get regional datasets from the global ones -The getregional_datasets.pl script to extract out regional datasets of interest. -Note, the script works on all files other than the "finidat" file as it's a 1D vector file. -The script will extract out a block of gridpoints from all the input global datasets, -and create the full suite of input datasets to run over that block. The input datasets -will be named according to the input "id" you give them and the id can then be used -as input to &CLMUSRDAT; to create a case that uses it. See -the section on &clm; Script Configuration Items for -more information on setting &CLMUSRDAT; (in ). The list of files extracted by -their name used in the namelists are: -fatmgrid, fatmlndfrc, -fsurdat, flanduse_timeseries, -stream_fldfilename_ndep, and the &datm; files -domainfile, and faerdep. -For more information on these files see the Table on required files. - - -The alternatives to using this script are to use &PTSMODE;, -discussed earlier, to use &ptclm; discussed in the next chapter, or creating the files -individually using the different file creation tools (given in the -Tools Chapter). Creating -all the files individually takes quite a bit of effort and time. &PTSMODE; -has some limitations as discussed earlier, but also as it uses global files, is -a bit slower when running simulations than using files that just have the set -of points you want to run over. Another advantage is that once you've created the -files using this script you can customize them if you have data on this specific -location that you can replace with what's already in these files. - - -The script requires the use of both "Perl" and "NCL". See the NCL Script section in the Tools Chapter -on getting and using NCL and NCL scripts. The main script to use is a &perl; script -which will then in turn call the NCL script that actually creates the output files. -The ncl script gets it's settings from environment variables set by the perl script. -To get help with the script use "-help" as follows: - -> cd models/lnd/clm/tools/ncl_scripts -> ./getregional_datasets.pl -help - -The output of the above is: - -&getreg_datasets; - - - -The required options are: -id, --ne, and -se, for the output identifier -name to use in the filenames, latitude and longitude of the Northeast corner, and -latitude and longitude of the SouthEast corner (in degrees). Options that specify -which files will be used are: -mask, -res, --rcp, -sim_year, and -sim_yr_rng -for the land-mask to use, global resolution name, representative concentration pathway -for future scenarios, simulation year, and simulation year range. The location of the -input and output files will be determined by the option -mycsmdata -(can also be set by using the environment variable $CSMDATA). If -you are running on a machine like at &ncar; where you do NOT have write permission -to the CESM inputdata files, you should use the scripts/link_dirtree -script to create soft-links of the original files to a location that you can write -to. This way you can use both your new files you created as well as the original -files and use them from the same location. - - -The remaining options to the script are -debug, -and -verbose. -debug is used to show what -would happen if the script was run, without creating the actual files. --verbose adds extra log output while creating the files so you -can more easily see what the script is doing. - - -For example, Run the extraction for data from 52-73 North latitude, 190-220 longitude -that creates 13x12 gridcell region from the f19 (1.9x2.5) global resolution over Alaska. - -Example of running <command>getregional_datasets.pl</command> to get -datasets for a specific region over Alaska - -> cd scripts -# First make sure you have a inputdata location that you can write to -# You only need to do this step once, so you won't need to do this in the future -> setenv MYCSMDATA $HOME/inputdata # Set env var for the directory for input data -> ./link_dirtree $CSMDATA $MYCSMDATA -> cd ../models/lnd/clm/tools/ncl_scripts -> ./getregional_datasets.pl -sw 52,190 -ne 73,220 -id 13x12pt_f19_alaskaUSA -mycsmdata $MYCSMDATA - - -Repeat this process if you need files for multiple sim_year, resolutions, land-masks, -and sim_year_range values. - - - -See for a warning about running single-point jobs -on batch machines. - - - - -See for notes about managing your data -when using link_dirtree. - - - -Now to run a simulation with the datasets created above, you create a single-point -case, and set &CLMUSRDAT; to the identifier used above. Note that in the example below -we set the number of processors to use to one (-pecount 1). For a single point, you -should only use a single processor, but for a regional grid, such as the example below -you could use up to the number of grid points (12x13=156 processors. - - -Example of using &CLMUSRDAT; to run a simulation using user datasets for a -specific region over Alaska - -> cd scripts -# Create the case and set it to only use one processor -> ./create_newcase -case my_userdataset_test -res pt1_pt1 -compset I1850 \ --mach bluefire -> cd my_userdataset_test/ -> ./xmlchange -file env_run.xml -id DIN_LOC_ROOT_CSMDATA -val $MYCSMDATA -> ./xmlchange -file env_conf.xml -id &CLMUSRDAT; -val 13x12pt_f19_alaskaUSA -> ./xmlchange -file env_conf.xml -id &CLMBLDNML; -val '-mask gx1v6' -> ./xmlchange -file env_conf.xml -id &CLM1PT; -val 13x12pt_f19_alaskaUSA -> ./configure -case - - - - - - - -Running with your own atmosphere forcing - -Here we want to run with our own customized datasets for &clm; as well as -running with our own supplied atmosphere forcing datasets. Thus we effectively -combine the information from with -. First we need to follow -the procedures in to come up with &clm; -datasets that are customized for our point or region in question. This includes -running link_dirtree to create a directory location where you -can add your own files to it. Next, set -DATM_MODE to "CLM1PT" and &CLM1PT; and &CLMUSRDAT; to the -id of the data you created. To see a list of what the filenames need to be -see the section on setting &CLMUSRDAT;. - - -Next we need to setup the atmosphere forcing data in &netcdf; format that can be -read by &datm;. There is a list of eight variables that are expected to be on the input -files with the names and units on the following table (in the table TDEW and SHUM -are optional fields that can be used in place of RH). In the table we also list -which of the fields are required and if not required what the code will do to -replace them. If the names of the fields are different or the list is changed -from the standard list of eight fields: FLDS, FSDS, PRECTmms, -PSRF, RH, TBOT, WIND, and ZBOT, the resulting streams file will need to be modified -to take this into account (see an example streams file for this in below). - -Atmosphere Forcing Fields - - - - Short-name - Description - Units - Required? - If NOT required how replaced - - - - - FLDSincident longwave -(FLDS)W/m2No -calculates based on Temperature, Pressure and Humidity - - - FSDSincident solar -(FSDS)W/m2Yes- - - - FSDSdifincident solar (FSDS) -diffuseW/m2Nobased on FSDS - - - FSDSdirincident solar (FSDS) -directW/m2Nobased on FSDS - - - PRECTmmsprecipitation -(PRECTmms)mm/sYes- - - - PSRFpressure at the lowest atm level -(PSRF)PaNoassumes standard-pressure - - - RHrelative humidity at the lowest atm level -(RH)%Nocan be replaced with SHUM or TDEW - - - SHUMspecific humidity at the lowest atm level -kg/kgOptional in place of RHcan be replaced with RH or TDEW - - - TBOTtemperature at the lowest atm level -(TBOT)K (or can be C)Yes- - - - TDEWdew point temperature -K (or can be C)Optional in place of RHcan be replaced with RH or SHUM - - - WINDwind at the lowest atm level -(WIND)m/sYes- - - - ZBOTobservational heightmNo -assumes 30 meters - - - -
-All of the variables should be dimensioned: time, lat, lon, with time being the unlimited -dimension. The coordinate variable "time" is also required with CF-compliant units in -days, hours, minutes, or seconds. It can also have a calendar attribute that can -be "noleap" or "gregorian". Normally the files will be placed in the: -$MYCSMDATA/atm/datm7/CLM1PT_data/$MYUSRDAT directory with separate files per -month called YYYY-MM.nc where YYYY-MM corresponds to the four -digit year and two digit month with a dash in-between. You also need a domain file that -gives the coordinate information for the data that should be placed in: -$MYCSMDATA/atm/datm7/domain.lnd.$MYUSRDAT_USGS.nc. - -Example of setting up a case with your own atmosphere forcing - -> cd scripts -# First make sure you have a inputdata location that you can write to -# You only need to do this step once, so you won't need to do this in the future -> setenv MYCSMDATA $HOME/inputdata # Set env var for the directory for input data -> ./link_dirtree $CSMDATA $MYCSMDATA -# Next create and move all your datasets into $MYCSMDATA with id $MYUSRDAT -# See above for naming conventions - -# Now create a single-point case -> ./create_newcase -case my_atmforc_test -res pt1_pt1 -compset I1850 \ --mach bluefire -> cd my_atmforc_test -# Set the data root to your inputdata directory, and set &CLM1PT; and &CLMUSRDAT; -# to the user id you created for your datasets above -> ./xmlchange -file env_run.xml -id DIN_LOC_ROOT_CSMDATA -val $MYCSMDATA -> ./xmlchange -file env_conf.xml -id &CLM1PT; -val $MYUSRDAT -> ./xmlchange -file env_conf.xml -id &CLMUSRDAT; -val $MYUSRDAT -# Set the land-mask to USGS, so both clm and &datm; can find files -> ./xmlchange -file env_conf.xml -id &CLMBLDNML; -val '-mask USGS' -# Then set DATM_MODE to single-point mode so &datm; will use your forcing datasets -# Put your forcing datasets into $MYCSMDATA/atm/datm7/CLM1PT_data/$MYUSRDAT -> ./xmlchange -file env_conf.xml -id DATM_MODE -val CLM1PT -> ./configure -case -# If the list of fields, or filenames, filepaths, or fieldnames are different -# you'll need to edit the &datm; namelist streams file to make it consistent -> $EDITOR Buildconf/datm.buildnml.csh - - -
- - -See for a warning about running single-point jobs -on batch machines. - - - - -See for notes about managing your data -when using link_dirtree. - - - - -Now, we'll show an example of what the &datm; streams file might look like for a case -with your own forcing data with 3-hourly forcing. In this example, we'll leave off the -fields: ZBOT, and FLDS so they'll be calculated as given in the - table above. We'll also include: -FSDSdif and FSDSdir which aren't required, and we'll use TDEW in place of RH. In this -example the datafiles are in &netcdf; format and contain the fields: TA, Tdew, WS, -PREC, Rg, Rgdir, Rgdif, and PRESS which are translated into the &datm; internal names -in this streams file. There is also a domain file that has the position information -for this location. The normal assumption for CLM1PT mode in the &datm; is that data is -hourly or half-hourly and as such is often enough that using the data on the nearest -time-stamp is reasonable and as such the data is in a single streams file (see - for more information on -the default settings for &datm; and how to change them. If the data is less often three to six hours -- see -below, where you will need to modify the time-interpolation method as well as the -time stamp offsets. In the example below we also have to divide the single -stream file into three files to manage the time-stamps and time interpolation -algorithm for the different types of data differently. - -Example of &datm; streams files with your own forcing for 3-hourly data - -Precipitation streams file -(clm1PT.1x1pt_lapazMEX.precip.stream.txt file) . - - -<streamstemplate> -<stream> - <dataSource> - CLMNCEP - </dataSource> - <domainInfo> - <variableNames> - time time - xc lon - yc lat - area area - mask mask - </variableNames> - <filePath> - $DIN_LOC_ROOT/atm/datm7/domain.clm - </filePath> - <fileNames> - domain.lnd.1x1pt_lapazMEX_navy.nc - </fileNames> - </domainInfo> - <fieldInfo> - <variableNames> - PRECTmms PREC - </variableNames> - <offset> - -5400 - </offset> - <filePath> - $DIN_LOC_ROOT/atm/datm7/CLM1PT_data/1x1pt_lapazMEX - </filePath> - <fileNames> - 2004-01.nc - 2004-02.nc - 2004-03.nc -. -. -. - 2009-12.nc - </fileNames> - </fieldInfo> -</stream> -</streamstemplate> - - -Solar streams file (clm1PT.1x1pt_lapazMEX.solar.stream.txt file). - - -<streamstemplate> -<stream> - <dataSource> - CLMNCEP - </dataSource> - <domainInfo> - <variableNames> - time time - xc lon - yc lat - area area - mask mask - </variableNames> - <filePath> - $DIN_LOC_ROOT/atm/datm7/domain.clm - </filePath> - <fileNames> - domain.lnd.1x1pt_lapazMEX_navy.nc - </fileNames> - </domainInfo> - <fieldInfo> - <variableNames> - FSDS Rg - FSDSdir Rgdir - FSDSdif Rgdif - </variableNames> - <offset> - -10800 - </offset> - <filePath> - $DIN_LOC_ROOT/atm/datm7/CLM1PT_data/1x1pt_lapazMEX - </filePath> - <fileNames> - 2004-01.nc - 2004-02.nc - 2004-03.nc -. -. -. - 2009-12.nc - </fileNames> - </fieldInfo> -</stream> -</streamstemplate> - - -Other fields streams file. -(clm1PT.1x1pt_lapazMEX.other.stream.txt file) . - - -<streamstemplate> -<stream> - <dataSource> - CLMNCEP - </dataSource> - <domainInfo> - <variableNames> - time time - xc lon - yc lat - area area - mask mask - </variableNames> - <filePath> - $DIN_LOC_ROOT/atm/datm7/domain.clm - </filePath> - <fileNames> - domain.lnd.1x1pt_lapazMEX_navy.nc - </fileNames> - </domainInfo> - <fieldInfo> - <variableNames> - TBOT TA - TDEW Tdew - WIND WS - PSRF PRESS - </variableNames> - <offset> - -5400 - </offset> - <filePath> - $DIN_LOC_ROOT/atm/datm7/CLM1PT_data/1x1pt_lapazMEX - </filePath> - <fileNames> - 2004-01.nc - 2004-02.nc - 2004-03.nc -. -. -. - 2009-12.nc - </fileNames> - </fieldInfo> -</stream> -</streamstemplate> - - -Example streams namelist for the above streams files: - - - &shr_strdata_nml - dataMode = 'CLMNCEP' - domainFile = '$DOMAINFILE' - streams = 'clm1PT.1x1pt_lapazMEX.solar.stream.txt 1 2004 2009 ', - 'clm1PT.1x1pt_lapazMEX.precip.stream.txt 1 2004 2009 ', - 'clm1PT.1x1pt_lapazMEX.other.stream.txt 1 2004 2009 ', - 'presaero.stream.txt 1 2000 2000' - vectors = 'null','null','null','null' - mapmask = 'nomask','nomask','nomask','nomask' - mapalgo = 'nn','nn','nn','nn' - tintalgo = 'coszen','nearest','linear','linear' - taxmode = 'cycle','cycle','cycle','cycle' - / - - - - - -The example above shows the resolved namelist and streams file after &configure; -has been run. In order to save this configuration for future use, you would need -to edit the &datm; template adding new DATM_MODE see - for more information on how to do this. - - - - -We've outlined and given a few examples of using your own atmosphere -forcing. In the next chapter we go into the details of using &ptclm;. - - -
- -
- diff --git a/doc/UsersGuide/special_cases.xml b/doc/UsersGuide/special_cases.xml deleted file mode 100644 index ac5c7fef3b..0000000000 --- a/doc/UsersGuide/special_cases.xml +++ /dev/null @@ -1,916 +0,0 @@ - - -How to run some special cases - -In this chapter we describe how to run some special cases that take more than one step -to do. The straightforward cases have compsets and/or build-namelist use-cases setup for -them or require simple editing of a single-case. All of the cases here require you -to do at least two simulations with different configurations, or require more complex -editing of the case (changing the streams files). - - -The nine cases we will describe are: - - - -Running with the prognostic crop model on - - - - -Running with the irrigation model on - - - - -Spinning up the Satellite Phenology Model (&clmsp; spinup) - - - - -Spinning up the biogeochemistry Carbon-Nitrogen Model (CN spinup) - - - - -Spinning up the Carbon-Nitrogen Dynamic Global Vegetation Model (CNDV spinup) - - - - -Running with MOAR data as atmospheric forcing to spinup the model - - - - -Running with your own previous simulation as atmospheric forcing to spinup the model - - - - -Doing perturbation error growth tests - - - - -Running stand-alone &clm; with transient historical &CO2; -concentration - - - - - - -The cases in this chapter are more sophisticated and require more technical knowledge -and skill than cases in previous chapters. The user should be very familiar with doing -simple cases before moving onto the cases described here. - - - -Running with the prognostic crop model on - - -In &clmcesm103; a prognostic crop model was added to &clm4;. The prognostic crop -model is setup to work with CN for present day conditions and we have surface -and initial condition datasets at f19 resolution. In order to use the initial condition -file, we need to set the RUN_TYPE to startup rather -than hybrid since the compset for f19 sets up to use an initial -condition file without crop active. To activate the crop model we simply add "-crop on" -to &CLMCONFIG;. - -Example Crop Simulation - -> cd scripts -> ./create_newcase -case CROP -res f19_g16 -compset ICN -mach bluefire -> cd CROP -# Append "-crop on" to &CLMCONFIG; in env_conf.xml (you could also use an editor) -> ./xmlchange -file env_conf.xml -id &CLMCONFIG; -val "-crop on" -append -# Change to startup type so uses spunup initial conditions file for crop if it exists -# By default the model will do a hybrid startup with an initial condition file -# incompatible with the crop surface dataset. -> ./xmlchange -file env_conf.xml -id RUN_TYPE -val startup -> ./configure -case -# Now build and run normally -> ./CROP.bluefire.build -> ./CROP.bluefire.submit - - - - - - -Running with the irrigation model on - - -In &clmcesm103; an irrigation model for generic crop was added to &clm4;. Currently, -irrigation and crop can NOT be used together see bug number 1326 in the -&KnownBugs; file. -The irrigation model is tuned to work only with &clmsp; see the caution below for -for more information on this. To turn on -irrigation we simply add "-irrig on" to &CLMBLDNML;. Just as in the crop example we -also change RUN_TYPE to startup so that we don't use -an initial condition file that is incompatible with irrigation. - -Example Irrigation Simulation - -> cd scripts -# Note here we do a &clmsp; simulation as that is what has been validated -> ./create_newcase -case IRRIG -res f19_g16 -compset I -mach bluefire -> cd IRRIG -# Append "-irrig on" to &CLMBLDNML; in env_conf.xml (you could also use an editor) -> ./xmlchange -file env_conf.xml -id &CLMBLDNML; -val "-irrig" -append -# Change to startup type so uses spunup initial conditions file for irrigation if it exists -# By default the model will do a hybrid startup with an initial condition file -# incompatible with the irrigation surface dataset. -> xmlchange -file env_conf.xml -id RUN_TYPE -val startup -> ./configure -case -# Now build and run normally -> ./IRRIG.bluefire.build -> ./IRRIG.bluefire.submit - - - - -We have only run the irrigation model with &clmsp; (i.e. without the CN model). We -recommend that if you want to run the irrigation model with CN, that you do a spinup -as outlined in the examples below. But, more than that you may need to make -the adjustments we discuss in . - - - - - - -Spinning up the Satellite Phenology Model (&clmsp; spinup) - - -To spin-up the &clmsp; model you merely need to run &clmsp; for 50 simulation -years starting from arbitrary initial conditions. You then use the final -restart file for initial conditions in other simulations. -Because, this is a straight forward operation we will NOT give -the details on how to do that here, but leave it as an exercise for the reader. -See the as an example of doing this -as the last step for &clmcn;. - - - - -Spinning up the biogeochemistry Carbon-Nitrogen Model (CN spinup) - -To get the &clmcn; model to a steady state, you first run it from arbitrary initial conditions -using the "accelerated decomposition spinup" (-ad_spinup in configure) mode for 600 simulation years. After -this you branch from this mode in the "exit spinup" (-exit_spinup in configure), run -for a simulation year, and then save a restart from that and use it as initial conditions -for further spinup of CN (at least 50 simulation years). - - -Spinup of &clmcn; - -AD_SPINUP - -For the first step of running 600 years in "-ad_spinup" mode, you will setup -a case, and then edit the values in env_conf.xml and -env_run.xml so that the right configuration is turned on and -the simulation is setup to run for the required length of simulation time. -So do the following: - -Example AD_SPINUP Simulation - -> cd scripts -> ./create_newcase -case CN_spinup -res f19_g16 -compset ICN -mach bluefire -> cd CN_spinup -# Append "-ad_spinup on" to &CLMCONFIG; in env_conf.xml -> ./xmlchange -file env_conf.xml -id &CLMCONFIG; -val "-ad_spinup on" -append -# The following sets &CLMFORCECOLD; to "on" in env_conf.xml (you could also use an editor) -> ./xmlchange -file env_conf.xml -id &CLMFORCECOLD; -val on -# Make the output history files only annual, by adding the following to the &usernlclm; namelist -> echo '&clm_inparm hist_nhtfrq = -8760 /' > &usernlclm; -# Now configure -> ./configure -case -> ./xmlchange -file env_run.xml -id STOP_DATE -val 6010101 -# Now build -> ./CN_spinup.bluefire.build -# The following sets RESUBMIT to 30 times in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id RESUBMIT -val 30 -# The following sets STOP_OPTION to "nyears" in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id STOP_OPTION -val nyears -# The following sets STOP_N to 20 years in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id STOP_N -val 20 -# The following sets STOP_DATE to Jan/1 of year 601 in env_run.xml (you could also use an editor) -# Now run normally -> ./CN_spinup.bluefire.submit - - -Afterwards save the last restart file from this simulation to use in the next step. - - - - -EXIT_SPINIP - - -Example EXIT_SPINUP Simulation - -> cd scripts -> ./create_newcase -case CN_exitspinup -res f19_g16 -compset ICN -mach bluefire -> cd CN_exitspinup -# Append "-exit_spinup on" to &CLMCONFIG; in env_conf.xml -> ./xmlchange -file env_conf.xml -id &CLMCONFIG; -val "-exit_spinup on" -append -# Change run type to branch and branch from the last year of the last simulation -> ./xmlchange -file env_conf.xml -id RUN_TYPE -val branch -> ./xmlchange -file env_conf.xml -id RUN_REFCASE -val CN_spinup -> ./xmlchange -file env_conf.xml -id RUN_REFDATE -val 0601-01-01 -> ./xmlchange -file env_conf.xml -id GET_REFCASE -val FALSE -> ./configure -case -# Go ahead and build, so that the run directory is created -> ./CN_exitspinup.bluefire.build -# Now, Copy the last restart files from the earlier case into your run directory -> cp /ptmp/$LOGIN/archive/CN_spinup/rest/CN_spinup.*.r*.0601-01-01-00000* /ptmp/$LOGIN/CN_exitspinup -# And copy the rpointer files for datm and drv from the earlier case -> cp /ptmp/$LOGIN/archive/CN_spinup/rest/rpointer.atm /ptmp/$LOGIN/CN_exitspinup -> cp /ptmp/$LOGIN/archive/CN_spinup/rest/rpointer.drv /ptmp/$LOGIN/CN_exitspinup -# The following sets STOP_OPTION to "nyears" in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id STOP_OPTION -val nyears -> ./xmlchange -file env_run.xml -id STOP_N -val 1 -# Now run normally -> ./CN_exitspinup.bluefire.submit - - - - - - -Final spinup - -Next save the last restart file from this step and use it as the "finidat" file to -use for one more spinup for at least 50 years in normal mode. -So do the following: - -Example Final CN Spinup Simulation - -> cd scripts -> ./create_newcase -case CN_finalspinup -res f19_g16 -compset ICN -mach bluefire -> cd CN_finalspinup -# The following sets &CLMFORCECOLD; to "on" in env_conf.xml (you could also use an editor) -> ./xmlchange -file env_conf.xml -id &CLMFORCECOLD; -val on -# Now, Copy the last &clm; restart file from the earlier case into your run directory -> cp /ptmp/$LOGIN/archive/CN_exitspinup/rest/CN_exitspinup.clm*.r*.0602-01-01-00000.nc \ -/ptmp/$LOGIN/CN_finalspinup -# And copy the rpointer files for datm and drv from the earlier case -> cp /ptmp/$LOGIN/archive/CN_exitspinup/rest/rpointer.atm /ptmp/$LOGIN/CN_finalspinup -> cp /ptmp/$LOGIN/archive/CN_exitspinup/rest/rpointer.drv /ptmp/$LOGIN/CN_finalspinup -# Set the finidat file to the last restart file saved in previous step -> echo '&clm_inparm finidat = "CN_exitspinup.clm2.r.0602-01-01-00000.nc" /' > &usernlclm; -# Now configure -> ./configure -case -> $EDITOR Buildconf/clm.buildnml.csh -> Now build -> .CN_finalspinup.bluefire.build -# The following sets RESUBMIT to 5 times in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id RESUBMIT -val 5 -# The following sets STOP_OPTION to "nyears" in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id STOP_OPTION -val nyears -# The following sets STOP_N to 10 years in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id STOP_N -val 10 -> Now run as normal -> .CN_finalspinup.bluefire.submit - - - - -To assess if the model is spunup plot trends of CN variables of interest. If you see -a trend, you may need to run the simulation longer. -Finally save the restart file from the end of this simulation to use as an "finidat" file for future -simulations. - - - - - - -Spinning up the Carbon-Nitrogen Dynamic Global Vegetation Model (CNDV spinup) - -To spinup the &clm; CNDV model -- you first follow the procedures above to spinup the CN model. -Then you take the CN initial state file you created for the spinup with just CN, and -run CNDV for 200 more years. -We've provided such spunup files for two resolutions (f09 and f19) and two time-periods -(1850 and 2000), so in this example we will use the files provided to start from. -We've also provided a spinup file at f19 resolution for CNDV, hence the following is -NOT required when running at f19. -If you were to start from your own &clmcn; spunup files -- the procedure would require -some modification. -There are no compsets using CNDV, so in -env_conf.xml change CLM_CONFIG_OPTS to --bgc cndv. - -Example CNDV Spinup Simulation - -> cd scripts -> ./create_newcase -case CNDV_spinup -res f09_g16 -compset ICN -mach bluefire -> cd CNDV_spinup -# Set run type to startup and do a cold start -> ./xmlchange -file env_conf.xml -id RUN_TYPE -val startup -# The following sets CLM_CONFIG_OPTS to "-bgc cndv" in env_conf.xml (you could also use an editor) -> ./xmlchange -file env_conf.xml -id CLM_CONFIG_OPTS -val "-bgc cndv" -# Make the default primary history file annual and add an annual 1D vector auxiliary file -# By putting the following in a &usernlclm; file. -> cat << EOF > &usernlclm; -&clm_inparm - hist_nhtfrq = -8760, -8760 - hist_mfilt = 1, 1 - hist_fincl2 = 'TLAI', 'TSAI', 'HTOP', 'HBOT', 'NPP' - hist_dov2xy = .true., .false. -/ -> ./configure -case -# NOTE: If you were using your own CN spinup files you would edit the namelist to use it -# $EDITOR Buildconf/clm.buildnml.csh -# -# Now build and run as normal -> ./CNDV_spinup.bluefire.build -# The following sets RESUBMIT to 10 times in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id RESUBMIT -val 10 -# The following sets STOP_OPTION to "nyears" in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id STOP_OPTION -val nyears -# The following sets STOP_N to 20 years in env_run.xml (you could also use an editor) -> ./xmlchange -file env_run.xml -id STOP_N -val 20 -# Make sure you turn archiving on, so you save your files to long term archival -> ./xmlchange -file env_run.xml -id DOUT_L_MS -val TRUE -> ./CNDV_spinup.bluefire.submit - - - - - -There is a build bug with &clmcesm103; see bug 1370 in the &KnownBugs; on -how to address this. - - - - - -In a data analysis tool you should examine the auxiliary file and examine the -pfts1d_wtgcell to see where and what types of vegetation have -been established. See the caution in for more -information on visualizing and analyzing 1D vector fields. - - - -CNDV also writes out two vector fields to "hv" auxiliary files, on an annual basis by -default. - - - - -We've provided a spinup file for CNDV at f19 resolution, you could also use -interpinic to interpolate this file to other resolutions. - - - - - -Running with MOAR data as atmospheric forcing to spinup the model - -Because it takes so long to spinup the CN model (as we just saw previously), if you -are doing fully coupled simulations with active atmosphere and ocean, you will want -to do the spinup portion of this "offline". So instead of doing expensive fully -coupled simulations for the spinup duration, you run &clm; in a very cheap "I" -compset using atmospheric forcing from a shorter fully coupled simulation -(or a simulation run previously by someone else). - - -In this example we will use the I1850SPINUPCN compset to setup -&clm; to run with atmospheric forcing from a previous fully coupled simulation with -data that is already stored on disk on bluefire. There are several simulations that -have high frequency data for which we can do this. You can also do this on a machine -other than bluefire, but would need to download the data from the Earth System Grid and -change the datapath similar to . -This compset is designed for constant -1850 conditions, but unfortunately (because of bug 1354 see the &KnownBugs; file) by -default it points to a transient simulation instead of an 1850 simulation. Here we -point to an 1850 simulation and setup the forcing years to run over. - -Example Simulation with MOAR Data on bluefire - -> cd scripts -> ./create_newcase -case MOARforce1850 -res f19_g16 -compset I1850SPINUPCN -mach bluefire -> cd MOARforce1850 -# The following sets the casename to point to for atm forcing (you could also use an editor) -> ./xmlchange -file env_conf.xml -id DATM_CPL_CASE -val b40.1850.track1.1deg.006a -# The following sets the align year and years to run over for atm forcing -# (you could also use an editor) -> ./xmlchange -file env_conf.xml -id DATM_CPL_YR_ALIGN -val 1 -> ./xmlchange -file env_conf.xml -id DATM_CPL_YR_START -val 960 -> ./xmlchange -file env_conf.xml -id DATM_CPL_YR_END -val 1030 -> ./configure -case -# Now build and run as normal -> ./MOARforce1850.bluefire.build -> ./MOARforce1850.bluefire.submit - - - - -Because of bug 1339 (see the &KnownBugs; file on this) -you can't run with 83 or more years of forcing. If you do need to run with more years of -forcing, you'll need to address the issue as outlined in the &KnownBugs; file. - - - - - - -Running with your own previous simulation as atmospheric forcing to spinup the model - -Another way that you might want to spinup the model is to run your own simulation -for a relatively short period (either a B, E, or F compset) and then use it as forcing -for your "I" case later. By only running 20 to 50 years for the fully coupled case, -you'll save a substantial amount of computer time rather than running the entire spinup -period with a fully coupled model. - - -The first thing we need to do is to run a fully coupled case and save the atmospheric -coupling fields on a three hourly basis. In this example, we will run on bluefire -and archive the data to a local disk that we can then use in the next simulation. - -Example Fully Coupled Simulation to Create Data to Force Next Example Simulation - -> cd scripts -> ./create_newcase -case myBCN1850 -res f09_g16 -compset B1850CN -mach bluefire -> cd myBCN1850 -> ./configure -case -# Set histaux_a2x3hr to .true. in cpl.buildnml.csh so output from the atmosphere model -# will be saved 3 hourly -$EDITOR BuildConf/cpl.buildnml.csh -# Now build -> ./myBCN1850.bluefire.build -# The following sets the archival disk space (you could also use an editor) -> ./xmlchange -file env_run.xml -id DOUT_S_ROOT -val '/glade/home/$USER/$CASE' -# Make sure files are archived to disk, but NOT to long term storage -# (you could also use an editor) -> ./xmlchange -file env_run.xml -id DOUT_S -val TRUE -> ./xmlchange -file env_run.xml -id DOUT_L_MS -val FALSE -# Set the run length to run a total of 20 years (you could also use an editor) -> ./xmlchange -file env_run.xml -id RESUBMIT -val 9 -> ./xmlchange -file env_run.xml -id STOP_OPTION -val nyears -> ./xmlchange -file env_run.xml -id STOP_N -val 2 -# Now run as normal -> ./myBCN1850.bluefire.submit - - - - -Now we run an I compset forced with the data from the previous simulation using -the &CPLHIST; option to DATM_MODE. See - for more information on the -&datm; settings for &CPLHIST; mode. - -Example Simulation Forced with Data from the Previous Simulation - -> cd scripts -> ./create_newcase -case frcwmyBCN1850 -res f09_g16 -compset I1850SPINUPCN -mach bluefire -> cd frcWmyBCN1850 -# The following sets the casename to point to for atm forcing (you could also use an editor) -> ./xmlchange -file env_conf.xml -id DATM_CPL_CASE -val "myBCN1850" -# The following sets the align year and years to run over for atm forcing -# (you could also use an editor) -> ./xmlchange -file env_conf.xml -id DATM_CPL_YR_ALIGN -val "1" -> ./xmlchange -file env_conf.xml -id DATM_CPL_YR_START -val "1" -> ./xmlchange -file env_conf.xml -id DATM_CPL_YR_END -val "20" -# Set the datapath in the template to the archival path from the case above -> sed -E 's#set datapath = ".+"#set datapath = "/glade/home/$USER/%c/cpl/hist"#' \ - Tools/Templates/datm.cpl7.template > new.datm.cpl7.template -> mv -f new.datm.cpl7.template Tools/Templates/datm.cpl7.template -> chmod +x Tools/Templates/datm.cpl7.template -> ./configure -case -# Now build and run as normal -> ./frcwmyBCN1850.bluefire.build -> ./frcwmyBCN1850.bluefire.submit - - - - -In order to accomplish this we needed to edit the &datm; template file. See - for more information on doing this. -If your input case was at a resolution besides f09 you would have to edit -the &datm; template file even further to use a domain file at the input resolution. - - - - - - -Doing perturbation error growth tests - -Doing perturbation error growth tests is a way to validate a port of -the model to a new machine or to verify that changes are only roundoff. -The steps are the same in either case, but in the discussion below I will -assume you are doing a port validation to a new machine (but in parentheses -I will put a reminder that it could also be for code-mods). -The basic idea is to run a case on the trusted machine (trusted code) and -another with initial conditions perturbed by roundoff and compare the results of -the two. The difference between these two simulations (the error) will grow over time -and describe a curve that we compare with the error growth on the new machine (code -changes). The error growth on the new machine is the difference between the non-perturbed -state on the trusted machine and the non-perturbed state on the new machine (code -changes). If the new machine (code changes) are well-behaved -the plot of this error growth compared to the error growth curve on the trusted machine -should be similar. If the -changes are NOT well-behaved the changes from the new machine (code changes) will be -larger than the perturbation changes. In summary the simulations and steps that need to be performed are: - - -Run a simulation with the trusted code on the trusted machine. -(optionally you can use a dataset from inputdata repository). - - - -Run a simulation with the trusted code on the trusted machine with initial conditions -perturbed by roundoff (using a namelist item to do so). -(this is optional is you are using inputdata repository datasets) - - - -Run a simulation with the new code on the non-trusted machine (code changes). - - -Do a plot of the RMS difference of history variables between simulation 1 and simulation 2. - - -Do a plot of the RMS difference of history variables between simulation 1 and simulation 3. - - -Compare the two plots in steps 4 and 5. - - -If the plots compare well the new machine (code changes) is running as well as the trusted machine. - - -If the plots do NOTcompare well the new machine is -NOTrunning as well as the trusted machine. Typically the -recommendation here is to lower the optimization level on the new machine and try -again (or in the case of code changes, modify or simplify the code changes to get -something that should be closer). - -The history variables we have used to do this is either 'TSOI', and/or 'TSA'. 'TSOI' are -the 3D snow and soil temperatures for vegetated land-units. If there is a change in -soil physics it should show up in this field (and it should show up even for something -that is at a pretty deep soil depth). However, as 'TSOI' is only for vegetated -land-units, changes in lake or urban land-units -- will NOT show up. 'TSA' by contrast is -the 2m surface temperature across all land-units, so changes in urban or lake land-units -will show up. However, changes in deep soil physics will only show up as it propagates -to the surface. So one field may show something that the other doesn't. In the examples, -we use 'TSOI', but 'TSA' can be used as well. And in most cases you should check both. - - - -Now we will give a detailed description of the procedure with examples and the -exact steps to perform. - - -Using Perturbation Error Growth Analysis to Verify a Port to a New Machine - -Running non-perturbed on trusted machine - -The first step is to run a non-perturbed case on the trusted machine. You need to run -all of the steps with the same compset and same resolution. For these examples we will -use 2-degree resolution with the ICN compset for 2000 conditions. You need to run for -three days with a cold-start. - - - -As we describe below, This is optional if you will be using datasets from the -inputdata repository to take place of this step. - - - - -Example non-perturbed error growth simulation - -> cd scripts -> ./create_newcase -case trustedMachinePergro0 -compset ICN -res f19_g16 \ --mach bluefire -> cd trustedMachinePergro0 -# Set the non-perturbed PERGRO use-case -> ./xmlchange -file env_conf.xml -id CLM_NML_USE_CASE -val pergro0_pd -# Set coldstart on so arbitrary initial conditions will be used -> ./xmlchange -file env_conf.xml -id CLM_FORCE_COLDSTART -val on -> ./xmlchange -file env_conf.xml -id RUN_TYPE -val startup -# Set PERGRO on in the configure -> ./xmlchange -file env_conf.xml -id &CLMCONFIG; -val "-pergro on" -append -# Now configure and build -> ./configure -case -> ./trustedMachinePergro0.bluefire.build -# Set it to run for three days and turn archiving off -> ./xmlchange -file env_run.xml -id STOP_N -val 3 -> ./xmlchange -file env_run.xml -id DOUT_S -val FALSE -# Run the case and then you will save the history file output for later use -> ./trustedMachinePergro0.bluefire.submit - - - - - -If you aren't able to do this step, as you don't have access to a trusted machine, you -can use datasets that are available from the svn inputdata repository to take place of -running it yourself. The disadvantage is that this is only done for certain model -versions and for exactly the configuration/namelist given here. You won't be able to -test it for your own custom code or configurations. - - - - -Running perturbed on the trusted machine - -The next step is to run a perturbed case on the trusted machine. - -Example perturbed error growth simulation - -> cd scripts -> ./create_newcase -case trustedMachinePergroRnd -compset ICN -res f19_g16 \ --mach bluefire -> cd trustedMachinePergroRnd -# Set the perturbed PERGRO use-case -> ./xmlchange -file env_conf.xml -id CLM_NML_USE_CASE -val pergro_pd -# Set coldstart on so arbitrary initial conditions will be used -> ./xmlchange -file env_conf.xml -id RUN_TYPE -val startup -> ./xmlchange -file env_conf.xml -id CLM_FORCE_COLDSTART -val on -# Set PERGRO on in the configure -> ./xmlchange -file env_conf.xml -id &CLMCONFIG; -val "-pergro on" -append -# Now configure and build -> ./configure -case -> ./trustedMachinePergroRnd.bluefire.build -# Set it to run for three days and turn archiving off -> ./xmlchange -file env_run.xml -id STOP_N -val 3 -> ./xmlchange -file env_run.xml -id DOUT_S -val FALSE -# Run the case and then you will save the history file output for later use -> ./trustedMachinePergroRnd.bluefire.submit - - - - - -If you aren't able to do this step, as you don't have access to a trusted machine, you -can use datasets that are available from the svn inputdata repository to take place of -running it yourself. The disadvantage is that this is only done for certain model -versions and for exactly the configuration/namelist given here. You won't be able to -test it for your own custom code or configurations. - - - - -Running non-perturbed on the new machine - -The next step is to run a non-perturbed case on the new machine. Here -we will demonstrate using the machine intrepid. For the previous two steps -you have the option of using datasets provided in the subversion inputdata -repository to take their place -- however this step is required. - -> cd scripts -> ./create_newcase -case newMachinePergro0 -compset ICN -res f19_g16 \ --mach intrepid -> cd newMachinePergro0 -# Set the non-perturbed PERGRO use-case -> ./xmlchange -file env_conf.xml -id CLM_NML_USE_CASE -val pergro0_pd -> ./xmlchange -file env_conf.xml -id CLM_FORCE_COLDSTART -val on -> ./xmlchange -file env_conf.xml -id RUN_TYPE -val startup -# Set PERGRO on in the configure -> ./xmlchange -file env_conf.xml -id &CLMCONFIG; -val "-pergro on" -append -# Now configure and build -> ./configure -case -> ./newMachinePergro0.intrepid.build -# Set it to run for three days and turn archiving off -> ./xmlchange -file env_run.xml -id STOP_N -val 3 -> ./xmlchange -file env_run.xml -id DOUT_S -val FALSE -# Run the case and then you will save the history file output for later use -> ./newMachinePergro0.intrepid.submit - - - - - -Plotting the differences - -You can use the cprnc program to compute root mean square differences -between the relevant history files. See for more information -on it and how to build it. On many platforms you will need to set some environment -variables in order to complete the build (see for -more information on building the tools). - -# Build the cprnc program -> cd models/lnd/clm/tools/cprnc -> gmake -# Now go to your case directory and run cprnc on the trusted-machine with and without -# perturbation -> cd ../../../../../scripts/trustedMachinePergro0 -> ../../models/lnd/clm/tools/cprnc/cprnc trustedMachinePergro0.clm2.h0.001-01-01.00000.nc \ -../trustedMachinePergroRnd/trustedMachinePergroRnd.clm2.h0.001-01-01.00000.nc > trustedPergro.log -# Copy the history file from the new machine to here -# -# And now run cprnc on the trusted-machine and the new machine both without perturbation -> ../../models/lnd/clm/tools/cprnc/cprnc trustedMachinePergro0.clm2.h0.001-01-01.00000.nc \ -../newMachinePergro0/newMachinePergro0.clm2.h0.001-01-01.00000.nc > newPergro.log -# Now extract out the RMS differences of TSOI for both -# You may want to extract out the RMS differences for TSA as well -# Changes in urban or lake land-units won't be detected with TSOI -> grep "RMS TSOI" trustedPergro.log | awk '{print $3}' > RMStrusted.dat -> grep "RMS TSOI" newPergro.log | awk '{print $3}' > RMSnewmachine.dat -# And plot the two curves up to your screen -> env TYPE=x11 RMSDAT=RMSnewmachine.dat RMSDAT2=RMStrusted.dat ncl \ -../../models/lnd/clm/tools/ncl_scripts/pergroPlot.ncl - -Here is a sample plot for several trusted machines: bluefire, intrepid, jaguar, -and edinburgh (with both the lahey and intel compilers). -The green line is the error growth for bluefire, the red is the error growth -for intrepid, the dashed navy is for jaguar, the dashed maroon is for the intel -compiler on edinburgh, and the thick dashed goldenrod line is for edinburgh with the -lahey compiler. Note, the data for this plot is in -models/lnd/clm/tools/ncl_scripts the files are named: -according to the legend. Note, that the lines tend to cluster together and follow -quite closely to the bluefire line which is our main trusted machine. -
-Sample Good Perturbation Error Growth Curves (within roundoff) - - - -
-
- - -When you do NOT have access to a trusted machine you can use the trusted file from -bluefire that is available on the inputdata repository. - -# Build the cprnc program -> cd models/lnd/clm/tools/cprnc -> gmake -# Get the unperturbed file from the subversion repository -> cd ../../../../../scripts/newMachinePergro0 -> set dir = "lnd/clm2/pergrodata" -> set file = bluefirePergro0.ICN.0001-01-01_1.9x2.5_gx1v6_simyr2000_clm4-cesm1_0_3.c110617.nc -> echo "trustedfile = DIN_LOC_ROOT/$dir/$file" > clm.input_data_list -> ../ccsm_utils/Tools/check_input_data -datalistdir . -export -inputdata $DIN_LOC_ROOT -# And now run cprnc on the bluefire file and the new machine both without perturbation -> ../../models/lnd/clm/tools/cprnc/cprnc $file \ -../newMachinePergro0/newMachinePergro0.clm2.h0.001-01-01.00000.nc > newPergro.log -# Now extract out the RMS difference -# You may want to extract out the RMS differences for TSA as well -# Changes in urban or lake land-units won't be detected with TSOI -> grep "RMS TSOI" newPergro.log | awk '{print $3}' > RMSnewmachine.dat -# And plot the new curve versus the trusted curve up to your screen -> env TYPE=x11 RMSDAT=RMSnewmachine.dat \ -RMSDAT2=../../models/lnd/clm/tools/ncl_scripts/RMSbluefire.dat \ -../../models/lnd/clm/tools/ncl_scripts/pergroPlot.ncl - - - -In the figure below we now show example of curves for changes that are larger than -roundoff. Once again the green curve is the trusted error growth from bluefire. The -other curves are for changes that may be fairly small, but are larger than roundoff. The -goldenrod curve is for using the 1850, and the navy is for using the 1999 Nitrogen -deposition files rather than for year 2000. The red is for using the 1850 aerosol -dataset rather than 2000, and the maroon is for adding the snow combination bug in. The -differences in changes that are greater than roundoff is that the curves climb very -steeply to the 10-6 value and then level off, while the -curve for bluefire climbs much more slowly and gradually. The curves also don't mimic -each other in any way, like the trusted machine plots do. -
-Sample Bad Perturbation Error Growth Curves (changes greater than roundoff) - - - -
- -
-
-
-
- - -Running stand-alone &clm; with transient historical &CO2; -concentration - -In this case you want to run a simulation with stand-alone &clm; responding -to changes in &CO2; for a historical period. -For this example, we will start with the "I_1850-2000_CN" compset that -has transient: land-use, Nitrogen and Aerosol deposition already. You could -also use another compset if you didn't want these other features to be transient. -In order to get &CO2; to be transient we need to edit the -&datm; template so that we add an extra streams file to describe how -&CO2; varies over the historical period. You also need -a &netcdf; datafile that datm can read that gives the variation. You could -supply your own file, but we have a standard file that is used by CAM for this -and our example will make use of this file. - - - -Most everything here has to do with changing datm rather than &clm; -to allow this to happen. As such the user that wishes to do this should -first become more familiar with datm and read the -&cesm; Data -Model User's Guide especially as it pertains to the datm. Note, also -that in this example we show how to edit the datm "buildnml" file for your -case, but you could do something similar by editing the datm template. - - - - -This section documents the process for doing something that is non-standard. -There may be errors with the documentation and process, and you may have to do -some work before all of this works for you. If that is the case, we recommend -that you do further research into understanding the process and the files, as -well as understanding the datm and how it works. You may have to read documentation -found in the code for datm as well as "csm_share". - - - -The datm has "streams" files that have rough XML-like syntax and specify the -location and file to get data from, as well as information on the variable names -and the data locations of the grid points. The datm expects specific variable names -and the datm "maps" the expected variable names from the file to the names expected -by datm. The file we are working with here is a file with a single-point, that covers -the entire globe (so the vertices go from -90 to 90 degrees in latitude and 0 to 360 -degrees in longitude). Since it's a single point it's a little easier to work with -than datasets that may be at a given horizontal resolution. The datm also expects -that variables will be in certain units, and only expects a limited number of -variables so arbitrary fields can NOT be exchanged this way. However, the process -would be similar for datasets that do contain more than one point. - - -The three things that are needed: a domain file, a data file, and a streams text file. -The domain file is a CF-compliant &netcdf; file that has information -on the grid points (latitudes and longitudes for cell-centers and vertices, mask -, fraction, and areas). The datafile is a CF-compliant &netcdf; file with the data that -will be mapped. The streams text file is the XML-like file that tells datm how to find -the files and how to map the variables datm knows about to the variable names on the -&netcdf; files. Note, that in our case the domain file and the data file are the same -file. In other cases, the domain file may be separate from the data file. - - -First we are going to create a case, and we will edit -the Buildconf/datm.buildnml.csh so that we add a -&CO2; data stream in. There is a streams text file -available in models/lnd/clm/doc/UsersGuide/co2_streams.txt, -that includes file with a &CO2; time-series from 1765 to 2007. - -Example Transient Simulation with Historical &CO2; - -> cd scripts -> ./create_newcase -case DATM_CO2_TSERIES -res f19_g16 -compset I_1850-2000_CN \ --mach bluefire -> cd DATM_CO2_TSERIES -# Set CCSM_BGC to CO2A so that CO2 will be passed from atmosphere to land -> ./xmlchange -file env_conf.xml -id CCSM_BGC -val CO2A -# Set CLM_CO2_TYPE to diagnostic so that the land will use the value sent from the atmosphere -> ./xmlchange -file env_conf.xml -id CLM_CO2_TYPE -val diagnostic -> ./configure -case -> cd Buildconf -# Copy the sample streams file over -> cp ../../../models/lnd/clm/doc/UsersGuide/co2_streams.txt . - - -The first thing we will do is to edit the datm buildnml script to add -a CO2 file stream in. To do this we will apply a patch with the differences -needed. The patch file addco2_datm.buildnml.diff is -in models/lnd/clm/doc/UsersGuide and looks like this... - -&co2streams_diff; - -So to apply the patch you do this... - -> cd scripts/DATM_CO2_TSERIES/Buildconf -> patch < ../../../models/lnd/clm/doc/UsersGuide/addco2_datm.buildnml.diff - -Once, you've done that you can build and run your case normally. - - - -The patch assumes you are using a I_1850-2000_CN compset out of the box, with -DATM_PRESAERO equal to trans_1850-2000. So it assumes standard -Qian atmosphere forcing, and transient prescribed aerosols from streams files. If your case changes -anything here the patch will fail, and you will need to put the changes in by hand. - - - - - -If the patch fails, you will have to add the changes to the -datm.buildnml.csh found in the above -patch file by hand. Basically, it adds an extra streams file for &CO2; to the end of the streams variable, -and other arrays associated with streams (adding mapalgo as a new array with bilinear for everything, but -the &CO2; file which should be "nn" for nearest neighbor). - - - - - -The streams file above is hard-coded for the path of the file on &ncar; computers. To use it on an outside -machine you'll need to edit the filepath in the streams file to point to the location where you have the file. - - - - -After going through these steps, you will have a case where you have datm reading -in an extra streams text file that points to a data file with &CO2; -data on it that will send that data to the &clm;. - - -
- diff --git a/doc/UsersGuide/stylesheethtml2docbook.xsl b/doc/UsersGuide/stylesheethtml2docbook.xsl deleted file mode 100644 index 13c30ff71d..0000000000 --- a/doc/UsersGuide/stylesheethtml2docbook.xsl +++ /dev/null @@ -1,579 +0,0 @@ - - - - - -wb -file:///epicuser/AISolutions/graphics/AIWorkbench/ - - - - - - - - - - <xsl:value-of select=".//html:h1[1] - |.//html:h2[1] - |.//html:h3[1]"/> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - inlinemediaobject - - mediaobject - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Matched -
- -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - No template for - - - - - - No template for - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
-
- - - - - - - _ - - - - - - - - - - - - - - _ - - - - - - - - - - - - - - - - - - - - - - - - - <xsl:value-of select=".//html:caption"/> - - - - - - - - - - - - - - - -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Attempting to count columns on a non-table element - - - Row parameter is not a valid row - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- diff --git a/doc/UsersGuide/tools.xml b/doc/UsersGuide/tools.xml deleted file mode 100644 index f5fd8beac4..0000000000 --- a/doc/UsersGuide/tools.xml +++ /dev/null @@ -1,1396 +0,0 @@ - - -Using the &clm; tools to create your own input datasets - -There are several tools provided with &clm; that allow you to create your own input -datasets at resolutions you choose, or to interpolate initial conditions to a different -resolution, or used to compare &clm; history files between different cases. The tools are -all available in the models/lnd/clm/tools directory. Most of the tools -are &FORTRAN; stand-alone programs in their own directory, but there is also a suite of -&ncl; -scripts in the ncl_scripts directory. Some of the &ncl; scripts are -very specialized and not meant for general use, and we won't document them here. They -still contain documentation in the script itself and the README file in the tools -directory. But, the list of generally important scripts and programs are: - - - cprnc to compare &netcdf; files with a time axis. - - - interpinic to interpolate initial condition files. - - - mkgriddata to create grid datasets. - - - mkdatadomain to create domain files from grid datasets -used by &datm; or docn. - - - mksurfdata to create surface datasets from grid datasets. - - - ncl_scripts/getregional_datasets.pl script to extract a -region or a single-point from global input datasets. See the single-point chapter -for more information on this. - - - ncl_scripts/npdepregrid.ncl interpolate the Nitrogen -deposition datasets to a new resolution. - - - ncl_scripts/aerdepregrid.ncl interpolate the Aerosol -deposition datasets to a new resolution. - - - - - -In the sections to come we will go into detailed description of how to use each of -these tools in turn. First, however we will discuss the common environment variables -and options that are used by all of the &FORTRAN; tools. Second, we go over the outline -of the entire file creation process for all input files needed by &clm; for a new -resolution, then we turn to each tool. In the last section we will -discuss how to customize files for particular observational sites. - - - -Common environment variables and options used in building the &FORTRAN; -tools - -The &FORTRAN; tools all have similar makefiles, and similar options for building. -All of the Makefiles use GNU Make extensions and thus require that you use GNU make -to use them. They also auto detect the type of platform you are on, using "uname -s" -and set the compiler, compiler flags and such accordingly. There are also environment -variables that can be set to set things that must be customized. All the tools use -&netcdf; and hence require the path to the &netcdf; libraries and include files. -On some platforms (such as Linux) multiple compilers can be used, and hence there -are env variables that can be set to change the &FORTRAN; and/or "C" compilers used. -The tools other than cprnc also allow finer control, by also -allowing the user to add compiler flags they choose, for both &FORTRAN; and "C", as -well as picking the compiler, linker and and add linker options. Finally the tools -other than cprnc allow you to turn -optimization on (which is off by default but on for the mksurfdata and -interpinic -programs) with the OPT flag so that the -tool will run faster. To get even faster performance, the interpinic, -mksurfdata, and -mkgriddata programs allow you to also use the SMP to -turn on multiple shared memory processors. -When SMP=TRUE you set the number of threads used by the program with -the OMP_NUM_THREADS environment variable. - - -Options used by all: cprnc, interpinic, -mkdatadomain, mkgriddata, and -mksurfdata - -LIB_NETCDF -- sets the location of the &netcdf; library. -INC_NETCDF -- sets the location of the &netcdf; include files. -USER_FC -- sets the name of the &FORTRAN; compiler. - -Options used by: interpinic, mkdatadomain, -mkgriddata, and mksurfdata - -MOD_NETCDF -- sets the location of the &netcdf; &FORTRAN; module. -USER_LINKER -- sets the name of the linker to use. -USER_CPPDEFS -- adds any CPP defines to use. -USER_CFLAGS -- add any "C" compiler flags to use. -USER_FFLAGS -- add any &FORTRAN; compiler flags to use. -USER_LDFLAGS -- add any linker flags to use. -USER_CC -- sets the name of the "C" compiler to use. -OPT -- set to TRUE to compile the code optimized (TRUE or FALSE) - -Options used by: interpinic, mkgriddata, and mksurfdata: - -SMP -- set to TRUE to turn on shared memory parallelism (i.e. -&omp;) (TRUE or FALSE) -Filepath -- list of directories to build source code from. -Srcfiles -- list of source code filenames to build executable from. - -Options used only by cprnc: - -EXEDIR -- sets the location where the executable will be built. -VPATH -- colon delimited path list to find the source files. - -More details on each environment variable. - - -LIB_NETCDF - -This variable sets the path to the &netcdf; library file -(libnetcdf.a). If not -set it defaults to /usr/local/lib. In order to use the tools -you need to build the &netcdf; library and be able to link to it. In order to build -the model with a particular compiler you may have to compile the &netcdf; library with -the same compiler (or at least a compatible one). - - - - - -INC_NETCDF - -This variable sets the path to the &netcdf; include directory (in order to find -the include file netcdf.inc). -if not set it defaults to /usr/local/include. - - - - - -MOD_NETCDF - -This variable sets the path to the &netcdf; module directory (in order to find -the &netcdf; &FORTRAN90; module file when &netcdf; is used with a &FORTRAN90; -use statement. When not set it defaults to the -LIB_NETCDF value. - - - - - -USER_FC - -This variable sets the command name to the &FORTRAN90; compiler to use when -compiling the tool. The default compiler to use depends on the platform. And -for example, on the AIX platform this variable is NOT used - - - - - -USER_LINKER - -This variable sets the command name to the linker to use when linking the object -files from the compiler together to build the executable. By default this is set to -the value of the &FORTRAN90; compiler used to compile the source code. - - - - - -USER_CPPDEFS - -This variable adds additional optional values to define for the C preprocessor. -Normally, there is no reason to do this as there are very few CPP tokens in the CLM -tools. However, if you modify the tools there may be a reason to define new CPP -tokens. - - - - - -USER_CC - -This variable sets the command name to the "C" compiler to use when -compiling the tool. The default compiler to use depends on the platform. And -for example, on the AIX platform this variable is NOT used - - - - - -USER_CFLAGS - -This variable adds additional compiler options for the "C" compiler to use -when compiling the tool. By default the compiler options are picked according -to the platform and compiler that will be used. - - - - - -USER_FFLAGS - -This variable adds additional compiler options for the &FORTRAN90; compiler to use -when compiling the tool. By default the compiler options are picked according -to the platform and compiler that will be used. - - - - - -USER_LDFLAGS - -This variable adds additional options to the linker that will be used when linking -the object files into the executable. By default the linker options are picked according -to the platform and compiler that is used. - - - - - -SMP - -This variable flags if shared memory parallelism (using i&omp;) should be used when -compiling the tool. It can be set to either TRUE or -FALSE, by default it is set to FALSE, so -shared memory parallelism is NOT used. When set to TRUE you can -set the number of threads by using the OMP_NUM_THREADS environment -variable. Normally, the most you would set this to would be to the number of on-node -CPU processors. Turning this on should make the tool run much faster. - - - -Note, that depending on the compiler answers may be different when SMP -is activated. - - - - - - -OPT - -This variable flags if compiler optimization should be used when -compiling the tool. It can be set to either TRUE or -FALSE, by default it is set to FALSE for -mkdatadomain and TRUE for -mksurfdata and interpinic. -Turning this on should make the tool run much faster. - - - -Note, you should expect that answers will be different when OPT -is activated. - - - - - - -Filepath - -All of the tools are stand-alone and don't need any outside code to operate. The -Filepath is the list of directories needed to compile -and hence is always simply "." the current directory. Several tools use -copies of code outside their directory that is in the &cesm; -distribution (either csm_share code or &clm; source code). - - - - - -Srcfiles - -The Srcfiles lists the filenames of the source code to use -when building the tool. - - - - - -EXEDIR - -The cprnc tool uses this variable to set the location of where the executable -will be built. The default is the current directory. - - - - - -VPATH - -The cprnc tool uses this variable to set the colon delimited pathnames of where -the source code exists. The default is the current directory. - - - - - - - - - -There are several files that are copies of the original files from either -models/lnd/clm/src/main, -models/csm_share/shr, or copies from other tool -directories. By having copies the tools can all be made stand-alone, but -any changes to the originals will have to be put into the tool directories -as well. - - - -The README.filecopies (which can be found in -models/lnd/clm/tools) is repeated here. - -&filecopies; - - - - - -General information on running the &FORTRAN; tools - -The tools run either one of two ways, with a namelist to provide options, or -with command line arguments (and NOT both). interpinic and -cprnc run with command line arguments, and the other tools -run with namelists. - - -Running &FORTRAN; tools with namelists - -mkgridata, mksurfdata and -mkdatadomain run with namelists that are read from -standard input. Hence, you create a namelist and then run them by -redirecting the namelist file into standard input as follows: - -./program < namelist - -For programs with namelists there is at least one sample namelist with the -name "program".namelist (i.e. mksurfdata.namelist -for the mksurfdata program). There may also be other sample -namelists that end in a different name besides "namelist". Namelists that you create -should be similar to the example namelist. The namelist values are also documented -along with the other namelists in the: - -models/lnd/clm/bld/namelist_files/namelist_definition.xml -file and default values in the: - -models/lnd/clm/bld/namelist_files/namelist_defaults_clm_tools.xml -file. - - - -Running &FORTRAN; tools with command line options - -interpinic and cprnc run with command line -arguments. The detailed sections below will give you more information on the command -line arguments specific to each tool. Also running the tool without any arguments -will give you a general synopsis on how to run the tool. For example to get help -on running interpinic do the following. - -cd models/lnd/clm/tools/interpinic -gmake -./interpinic - - - - -Running &FORTRAN; tools built with SMP=TRUE - -When you enable SMP=TRUE on your build of one of the tools that -make use of it, you are using &omp; for shared memory parallelism (SMP). In -SMP loops are run in parallel with different threads run on different processors -all of which access the same memory (called on-node). Thus you can only usefully -run up to the number of processors that are available on a single-node of the machine -you are running on. For example, on the &ncar; machine bluefire there are 32 processors -per node, but the SMT hardware on the machine allows you to submit twice as many -threads or 64 threads. So to run the mksurfdata on bluefire -optimized, with 64 threads you would do the following: - -cd models/lnd/clm/tools/mksurfdata -gmake OPT=TRUE SMP=TRUE -setenv OMP_NUM_THREADS 64 -./mksurfdata < mksurfdata.namelist - - - - - - -The File Creation Process - - -When just creating a replacement file for an existing one, the relevant tool should -be used directly to create the file. When you are creating a set of files for a new -resolution there are some dependencies between the tools that you need to keep in mind -when creating them. The main dependency is that the mkgriddata MUST -be done first as the grid dataset is then input into the other tools. Also look at -. - - - -Creating a complete set of files for input to &clm; - -Create grid and fraction datasets - -First use mkgriddata to create grid and fraction datasets. -See for more information on this. - - - - -Create domain dataset (if NOT already done) - -Next use mkdatadomain to create a domain file for use by -&datm; from the grid and fraction datasets just created. This is required, unless -a domain file already created was input into mkgriddata on -the previous step. -See for more information on this. - - - - -Create surface datasets - -Next use mksurfdata to create a surface dataset, using the grid -dataset as input. -See for more information on this. - - - - -Interpolate aerosol deposition datasets (optional) - -By default the atmosphere model will interpolate -these datasets on the fly, so you don't normally need to do this step. -A reason you might want to do this is to make the read and interpolation faster, -by reducing the amount of data read in and removing the need for the interpolation. -So, if you do, you can use aerdepregrid.ncl to regrid aerosol -deposition datasets to your new resolution using the grid dataset as input. -See for more information on this. - - - - -Interpolate Nitrogen deposition datasets (optional, but only needed if running &clmcn;) - -By default Nitrogen deposition is read in from stream -files at 2-degree resolution and interpolated to the resolution you are running at, -so you don't need to do this step. As with aerosol deposition datasets a reason -you might want to do this is to make the read and interpolation faster, -by reducing the amount of data read in and removing the need for the interpolation. -So, if you do you can use ndepregrid.ncl -to regrid Nitrogen deposition datasets to your new resolution using the grid dataset -as input. -See for more information on this. - - - - -Create some sort of initial condition dataset - - -You then need to do one of the following three options to have an initial dataset -to start from. - - - - - -Use spinup-procedures to create initial condition datasets - -The first option is to do the spinup procedures from arbitrary initial conditions -to get good initial datasets. This is the most robust method to use. -See , , or - for more information on this. - - - - -Use <command>interpinic</command> to interpolate existing initial -condition datasets - -The next option is to interpolate from spunup datasets at a different resolution, using -interpinic. -See for more information on this. - - - - -Start up from arbitrary initial conditions - -The last alternative is to run from arbitrary initial conditions without using any -spun-up datasets. This is inappropriate when using &clmcn; (bgc=cn or cndv) as it -takes a long time to spinup Carbon pools. - - -This is NOT recommended as many fields in &clm; take a long time to equilibrate. - - - - - - - - - - -Enter the new datasets into the &buildnml; XML database - -The last optional thing to do is to enter the new datasets into the &buildnml; -XML database. See for more information on -doing this. This is optional because the user may enter these files into their -namelists manually. The advantage of entering them into the database is so that -they automatically come up when you create new cases. - - - - - - - - -Using the <command>cprnc</command> tool to compare two history files - -cprnc is a tool shared by both CAM and &clm; to compare two -&netcdf; history files. -It differences every field that has a time-axis that is also shared on both files, -and reports a summary of the difference. The summary includes the three largest -differences, as well as the root mean square (RMS) difference. It also gives some -summary information on the field as well. You have to enter at least one file, and up to -two files. With one file it gives you summary information on the file, and with two it -gives you information on the differences between the two. At the end it will give you a -summary of the fields compared and how many fields were different and how many were -identical. - - -Options: - --m = do NOT align time-stamps before comparing --v = verbose output --ipr --jpr --kpr - -See the cprnc -README file for more details which is -repeated here: - -&cprnc_readme; - - - -To compare files with OUT a time axis you can use the cprnc.ncl -&ncl; script in models/lnd/clm/tools/ncl_scripts. It won't give -you the details on the differences but will report if the files are identical or -different. - - - - - - -Using <command>interpinic</command> to interpolate initial conditions to different -resolutions - -"interpinic" is used to interpolate initial conditions from one resolution to another. -In order to do the interpolation you must first run &clm; to create a restart file to -use as the "template" to interpolate into. Running from arbitrary initial conditions -(i.e. finidat = ' ') for a single time-step is sufficient to do this. Make sure the -model produces a restart file. You also need to make sure that you setup the same -configuration that you want to run the model with, when you create the template file. - - -Command line options to interpinic: - --i = Input filename to interpolate from --o = Output interpolated file, and starting template file - - - -There is a sample template file in the models/lnd/clm/tools/interpinic -directory and can be used to run interpolate to. -However, this file was created with an older version of &clm; and hence -we actually recommend that you would do a short run with &clm; to create a template file -to use. - - - - -Example of running &clm; to create a template file for -<command>interpinic</command> to interpolate to - -> cd scripts -> ./create_newcase -case cr_f10_TmpltI1850CN -res f10_f10 -compset I1850CN \ --mach bluefire -> cd cr_f10_TmpltI1850CN -# Set starting date to end of year -> ./xmlchange -file env_conf.xml -id RUN_STARTDATE -val 1948-12-31 -# Set year align to starting year -> ./xmlchange -file env_conf.xml -id DATM_CLMNCEP_YR_ALIGN -val 1948 -# Set to run a cold start -> ./xmlchange -file env_conf.xml -id CLM_FORCE_COLDSTART -val on -# Set to run only a single day, so a restart file will be created on Jan/1/1949 -> ./xmlchange -file env_run.xml -id STOP_N -val 1 -# Then configure, build and run as normal -> ./configure -case -> ./cr_f10_TmpltI1850CN.bluefire.build -> ./cr_f10_TmpltI1850CN.bluefire.submit -# And copy the resulting restart file to your interpinic directory -> cd ../models/lnd/clm/tools/interpinic -> cp /ptmp/$LOGIN/cr_f10_TmpltI1850CN/run/cr_f10_TmpltI1850CN.clm2.r.1949-01-01-00000.nc . - - - - -In the next example we build interpinic optimized with shared -memory on for 64 threads so that it runs as fast as possible, to interpolate one of -the standard 1-degree datasets to the above 10x15 template file that we created. - - -Example of building and running <command>interpinic</command> to -interpolate a 1-degree <filename>finidat</filename> dataset to 10x15 - -> cd models/lnd/clm/tools/interpinic -> gmake OPT=TRUE SMP=TRUE -> env OMP_NUM_THREADS=64 ./interpinic -o cr_f10_TmpltI1850CN.clm2.r.1949-01-01-00000.nc / --i /fs/cgd/csm/inputdata/ccsm4_init/b40.1850.track1.1deg.006/0863-01-01/b40.1850.track1.1deg.006.clm2.r.0863-01-01-00000.nc - - - - -Running interpinic at high resolution can take a long time, so we -recommend that you always build it optimized and with shared memory processing on, to -cut down the run time as much as possible. - - - - -interpinic does NOT work for CNDV (bgc=cndv). - - - - - - -In we give a simpler way to run -interpinic for several standard resolutions at once, with a script -to loop over several resolutions. This is useful for &clm; developers who need to -create many finidat files at once. - - - - - -Using <command>mkgriddata</command> to create grid datasets - -mkgriddata is used to create grid, fraction, and topography -datasets to run &clm; at a new resolution. It is typically the first step in creating -datasets needed to run &clm; at a new resolution (followed by -mksurfdata, and -then the interpolation programs, aerdepregrid.ncl, and -ndepregrid.ncl when running with CN). - - - -mkgriddata namelist - -mkgriddata is controlled by a namelist. There are ten different -namelist items, and you need to use enough of them so that files will be output. -The different types of input datasets contain different input data types, that -correspond to the three different types of output files: grid, fraction, and topography. -Output files for each of these will only be output if there is input data that -correspond to these. If you only have input data for grid locations -- you will only -get an output grid file. If you have both grid and fraction data you will get grid and -fraction data files. If you also have topography data you will also get topo files. - - -Namelist options to mkgriddata include: - -mksrf_fnavyoro -- Navy orography file to use for land fraction -and surface heights. -mksrf_frawtopo -- Raw topography file with just surface -heights. -mksrf_fcamfile -- CAM initial conditions file with -land-fractions and topography -mksrf_fclmgrid -- &clm; grid file -mksrf_fccsmdom -- &cesm; domain file -mksrf_fcamtopo -- CAM topography file -mksrf_lsmlon -- number of longitude for regional grid -mksrf_lsmlatnumber of latitudes for regional grid -mksrf_edgen -- Northern edge for regional grid -mksrf_edgee -- Southern edge for regional grid -mksrf_edges -- Eastern edge for regional grid -mksrf_edgew -- Western edge for regional grid - - - -You need to enter one of the following four options: - - mksrf_fnavyoro - high resolution topo dataset (topo data) - mksrf_lsmlon - number of longitudes - mksrf_lsmlat - number of latitudes - mksrf_edgen - northern edge of grid (degrees) - mksrf_edgee - eastern edge of grid (degrees) - mksrf_edges - southern edge of grid (degrees) - mksrf_edgew - western edge of grid (degrees) - -or - - mksrf_fcamfile - CAM topo file (grid and possibly fraction data) - -or - - mksrf_fccsmdom - &cesm; domain file (both grid, and fraction data) - -or - - mksrf_fclmgrid - &clm; grid or surface dataset file (grid data) - -Note, you can provide more than one of the needed datasets, and the output -data will be determined by the datasets according to an order of precedence. -The order of precedence for data is as follows: - -mksrf_fcamfile -mksrf_fclmgrid -mksrf_fnavyoro -mksrf_fccsmdom - -Grid data then will be established by the file with the highest precedence. -&cesm; domain files sometimes have latitudes and longitudes that are "off" from -the standard by a small amount. By establishing an order of precedence you can ensure -that grid locations exactly match a given standard file, even if the values in the domain -file are off from that. - - - -There are three different major modes for using "mkgriddata" to create grid files -for &clm;: - -mksrf_fnavyoro -- Navy orography file to use for land fraction -and surface heights. -mksrf_frawtopo -- Raw topography file with just surface -heights. -mksrf_fcamfile -- CAM initial conditions file with -land-fractions and topography -mksrf_fclmgrid -- &clm; grid file -mksrf_fccsmdom -- &cesm; domain file -mksrf_fcamtopo -- CAM topography file -mksrf_lsmlon -- number of longitude for regional grid -mksrf_lsmlat -- number of latitudes for regional grid -mksrf_edgen -- Northern edge for regional grid -mksrf_edgee -- Southern edge for regional grid -mksrf_edges -- Eastern edge for regional grid -mksrf_edgew -- Western edge for regional grid - - - -You need to enter one of the following four options: - - mksrf_fnavyoro - high resolution topo dataset (topo data) - mksrf_lsmlon - number of longitudes - mksrf_lsmlat - number of latitudes - mksrf_edgen - northern edge of grid (degrees) - mksrf_edgee - eastern edge of grid (degrees) - mksrf_edges - southern edge of grid (degrees) - mksrf_edgew - western edge of grid (degrees) - -or - - mksrf_fcamfile - CAM topo file (grid and possibly fraction data) - -or - - mksrf_fccsmdom - &cesm; domain file (both grid, and fraction data) - -or - - mksrf_fclmgrid - &clm; grid or surface dataset file (grid data) - -Note, you can provide more than one of the needed datasets, and the output -data will be determined by the datasets according to an order of precedence. -The order of precedence for data is as follows: - -mksrf_fcamfile -mksrf_fclmgrid -mksrf_fnavyoro -mksrf_fccsmdom - -Grid data then will be established by the file with the highest precedence. -&cesm; domain files sometimes have latitudes and longitudes that are "off" from -the standard by a small amount. By establishing an order of precedence you can ensure -that grid locations exactly match a given standard file, even if the values in the domain -file are off from that. - - - -There are three different major modes for using mkgriddata to -create grid files for &clm;: - -Convert &cesm; domain files to &clm; grid files -Create single point or regional area grid files -Convert CAM files to &clm; grid files - - - - - -Convert &cesm; domain files to &clm; grid files - -&cesm; domain files such as used for &datm;, include all the information -needed to create &clm; grid and fraction files. - -Example <command>mkgriddata</command> namelist to convert &cesm; 4x5 domain files to &clm; grid files - -&clmexp - mksrf_fccsmdom= -'/fs/cgd/csm/inputdata/lnd/dlnd7/domain.lnd.4x5_gx3v5.060404.nc' - mksrf_fclmgrid= -'/fs/cgd/csm/inputdata/lnd/clm2/griddata/griddata_4x5_060404.nc' -/ - - - - -Notice that in the above example, a &clm; grid file is included as well, even though -it's not required. The reason for this is to ensure that the latitude and longitudes -on the output files exactly match a standard grid file. - - - - - - -Create single point or regional area grid files - -The process to create single-point or regional area &clm; grid files is the same. -You enter the number of latitudes and longitudes you want on your output file and -the extent of the grid: North, East, South and West. You also tell -mkgriddata that -you are entering a "regional" grid and you also enter the standard Navy orography -dataset (or your own orography file if desired). For a single point you simply -enter "1" for the number of latitudes and longitudes, but you still enter the -grid extent (of the single grid cell). Here is a sample regional namelist to create -a 5x5 regional grid over the Amazon: - -Example <command>mkgriddata</command> namelist to create regional grid over Amazon - -&clmexp - mksrf_fnavyoro= -"/fs/cgd/csm/inputdata/lnd/clm2/rawdata/mksrf_navyoro_20min.c010129.nc" - mksrf_lsmlon = 5 - mksrf_lsmlat = 5 - mksrf_edgee = 303.75 - mksrf_edgew = 286.25 - mksrf_edges = -15. - mksrf_edgen = -4. -/ - - - - - -Currently you can NOT have regional grids that straddle both -sides of the Greenwich (longitude = zero) line. - - - - -You should enter longitudes with values from 0 to 360 East. - - - - - -Convert <acronym>CAM</acronym> files to &clm; grid files (deprecated) - -Older CAM initial files included all the information needed to create &clm; -grid files. Newer CAM files no longer include land fraction data. Hence you -can use CAM files to give you the grid coordinates, but you need other data -to give you the land-mask and topography. Since, CAM files no longer -contain the needed information, this option is now deprecated. In most cases you should -use one of the other two options. - - - - - - - -Using <command>mkdatadomain</command> to create domain datasets for &datm; or docn from &clm; grid datasets - -"mkdatadomain" is used to convert &clm; grid and fraction datasets into domain datasets -that can be used by either the "datm" or "docn" models. Most often &clm; users will want -to convert the grid datasets they just created using mkgriddata into -domain datasets to be used by &datm; for an "I" case. mkdatadomain is -controlled by a namelist, and has a very straight forward operation with only four -namelist items all of which are required. You specify which output mode you want "datm" -or "docn", and then set the input &clm; grid and frac datasets, and the output domain file. - - -Example <command>mkdatadomain</command> namelist to create a domain file from -&clm; frac and grid data files - -&domain_nl - dtype = "datm" - f_fracdata = -'/fs/cgd/csm/inputdata/lnd/clm2/griddata/fracdata_4x5_USGS_070110.nc' - f_griddata = -'/fs/cgd/csm/inputdata/lnd/clm2/griddata/griddata_4x5_060404.nc' - f_domain = -'domain.lnd.fv4x5_USGS.090117.nc' -/ - - - - - -Using mksurfdata to create surface datasets from grid datasets - -mksurfdata is used to create surface-datasets from grid datasets and raw datafiles -at half-degree resolution to produce files that describe the surface characteristics -needed by &clm; (fraction of grid cell covered by different land-unit types, and fraction -for different vegetation types, as well as things like soil color, and soil texture, -etc.). To run mksurfdata you can either use the -mksurfdata.pl script which will create namelists for you using the &buildnml; -XML database, or you can run it by hand using a namelist that you provide (possibly -modeled after an example provided in the -models/lnd/clm/tools/mksurfdata directory). The namelist for -mksurfdata is sufficiently complex that we recommend using the -mksurfdata.pl tool to build them. In the next section -we describe how to use the mksurfdata.pl script and the following -section gives more details on running mksurfdata by hand and the -various namelist input variables to it. - - -Running <command>mksurfdata.pl</command> - -The script mksurfdata.pl can be used to run the -mksurfdata program for several configurations, resolutions, -simulation-years and simulation year ranges. It will create the needed namelists for -you and move the files -over to your inputdata directory location (and create a list of the files created, and -for developers this file is also a script to import the files into the svn inputdata -repository). It will also use the &buildnml; XML database -to determine the correct input files to use, and for transient cases it will create -the appropriate mksrf_fdynuse file with the list of files for each -year needed for this case. And in the case of urban single-point -datasets (where surface datasets are actually input into mksurfdata) -it will do the additional processing required so that the output dataset -can be used once again by mksurfdata. Because, it figures out -namelist and input files for you, it is recommended that you use this script for creation -of standard surface datasets. If you need to create surface datasets for customized -cases, you might need to run mksurfdata on it's own. But you -could use mksurfdata.pl with the "-debug" option to give you -a namelist to start from. -For help on mksurfdata.pl you can use the "-help" option as below: - -> cd models/lnd/clm/tools/mksurfdata -> mksurdata.pl -help - -The output of the above command is: - -&mksurfdatapl; - - - -To run the script with optimized mksurfdata for a 4x5 degree grid -for 1850 conditions, on bluefire you would do the following: - -Example of running <command>mksurfdata.pl</command> to create a 4x5 resolution -<filename>fsurdat</filename> for a 1850 simulation year - -> cd models/lnd/clm/tools/mksurfdata -> gmake -> mksurfdata.pl -y 1850 -r 4x5 - - - - - - -Running <command>mksurfdata</command> by Hand - -In the above section we show how to run mksurfdata through -the mksurfdata.pl using input datasets that are in the &buildnml; -XML database. When you are running with input datasets that are NOT available in -the XML database you either need to add them as outlined in -, or you need to run mksurfdata -by hand, as we will outline here. - - - -Preparing your <command>mksurfdata</command> namelist - -When running mksurfdata by hand you will need to prepare your -own input namelist. There are sample namelists that are setup for running on the -&ncar; machine bluefire. You will need to change the filepaths to -run on a different machine. The list of sample namelists include - -mksurfdata.namelist -- standard sample namelist. -mksurfdata.regional -- sample namelist to -build for a regional grid dataset (5x5_amazon) -mksurfdata.singlept -- sample namelist to -build for a single point grid dataset (1x1_brazil) - -Note, that one of the inputs mksrf_fdynuse is a filename that -includes the filepaths to other files. The filepaths in this file will have to -be changed as well. You also need to make sure that the line lengths remain the same -as the read is a formatted read, so the placement of the year in the file, must remain -the same, even with the new filenames. One advantage of the mksurfdata.pl -script is that it will create the mksrf_fdynuse file for you. - - -We list the namelist items below. Most of the namelist items are filepaths to give to -the input half degree resolution datasets that you will use to scale from to the -resolution of your grid dataset. -You must first specify the input grid dataset for the resolution to output for: - -mksrf_fgrid Grid dataset - -Then you must specify settings for input high resolution datafiles - -mksrf_ffrac land fraction and land mask dataset -mksrf_fglacier Glacier dataset -mksrf_fglacierregion Glacier region ID dataset -mksrf_flai Leaf Area Index dataset -mksrf_flanwat Land water dataset -mksrf_forganic Organic soil carbon dataset -mksrf_fmax Max fractional saturated area dataset -mksrf_fsoicol Soil color dataset -mksrf_fsoitex Soil texture dataset -mksrf_ftopo Topography dataset (this is used to limit -the extent of urban regions and is used for glacier multiple elevation classes) - -mksrf_furban Urban dataset -mksrf_fvegtyp PFT vegetation type dataset -mksrf_fvocef Volatile Organic Compound Emission Factor -dataset -mksrf_fgdp GDP dataset -mksrf_fpeat Peatland dataset -mksrf_fabm Agricultural fire peak month dataset -mksrf_ftopostats Topography statistics dataset -mksrf_fvic VIC parameters dataset -mksrf_fch4 Inversion-derived CH4 parameters dataset - -You specify the ASCII text file with the land-use files. - -mksrf_fdynuse "dynamic land use" for transient -land-use/land-cover changes. This is an ASCII text file that lists the filepaths -to files for each year and then the year it represents (note: you MUST change the -filepaths inside the file when running on a machine NOT at &ncar;). -We always use this file, even for creating datasets of a fixed year. Also note -that when using the "pft_" settings this file will be an XML-like file with settings -for PFT's rather than filepaths (see below). - - - -And optionally you can specify settings for: - -all_urban If entire area is urban (typically used for -single-point urban datasets, that you want to be exclusively urban) -no_inlandwet If TRUE, set wetland to 0% over land -(renormalizing other landcover types as needed); wetland will only be used for ocean -points. (Only applies to CLM4.5 version of mksurfdata_map, for which the default is -TRUE.) -mksrf_firrig Irrigation dataset, if you want -activate the irrigation model over generic cropland -(experimental mode, normally NOT used) -mksrf_gridnm Name of output grid resolution (if not -set the files will be named according to the number of longitudes by latitudes) -mksrf_gridtype Type of grid (default is 'global') -nglcec number of glacier multiple elevation classes. -Can be 0, 1, 3, 5, or 10. When using the resulting dataset with &clm; you can then run -with glc_nec of either 0 or this value. - (experimental normally use the default of 0, when running with the land-ice -model in practice only 10 has been used) -numpft number of Plant Function Types (PFT) -in the input vegetation mksrf_fvegtyp dataset. You change -this to 20, if you want to create a dataset with prognostic crop activated. The -vegetation dataset also needs to have prognostic crop types on it as well. - (experimental normally not changed from the default of 16) -outnc_large_files If output should be in &netcdf; large file -format -outnc_double If output should be in double -precision (normally we turn this on) -pft_frc array of fractions to override PFT -data with for all gridpoints (experimental mode, normally NOT used). -pft_idx array of PFT indices to override PFT -data with for all gridpoints (experimental mode, normally NOT used). -soil_clay percent clay soil to override -all gridpoints with (experimental mode, normally NOT used). -soil_color Soil color to override -all gridpoints with (experimental mode, normally NOT used). -soil_fmax Soil maximum fraction to override -all gridpoints with (experimental mode, normally NOT used). -soil_sand percent sandy soil to -override all gridpoints with (experimental mode, normally NOT used). - - - -After creating your namelist, -when running on a non &ncar; machine you will need to get the files -from the inputdata repository. -In order to retrieve the files needed for mksurfdata you can do the following on your -namelist to get the files from the inputdata repository, using the -check_input_data script which also allows you to export data to -your local disk. - -Getting the raw datasets for <command>mksurfdata</command> to your local -machine using the <command>check_input_data</command> script - -> cd models/lnd/clm/tools/mksurfdata -# First remove any quotes and copy into a filename that can be read by the -# check_input_data script -> sed "s/'//g" namelist > clm.input_data_list -# Run the script with -export and give the location of your inputdata with $CSMDATA -> ../../../../../scripts/ccsm_utils/Tools/check_input_data -datalistdir . \ --inputdata $CSMDATA -check -export -# You must then do the same with the flanduse_timeseries file referred to in the namelist -# in this case we add a file = to the beginning of each line -> awk '{print "file = "$1}' landuse_timeseries_hist_simyr2000-2000.txt > clm.input_data_list -# Run the script with -export and give the location of your inputdata with $CSMDATA -> ../../../../../scripts/ccsm_utils/Tools/check_input_data -datalistdir . \ --inputdata $CSMDATA -check -export - - - - -Experimental options to <command>mksurfdata</command> - -The options: pft_frc, pft_idx, soil_clay, soil_color, soil_fmax, and soil_sand are also -new and considered experimental. They provide a way to override the PFT and soil -values for all grid points to the given values that you set. This is useful for -running with single-point tower sites where the soil type and vegetation is known. -Note that when you use pft_frc, all other landunits will be zeroed out, and the -sum of your pft_frc array MUST equal 100.0. Also note that when using the "pft_" -options the mksrf_fdynuse file instead of having filepath's -will be an XML-like file with PFT settings. Unlike the file of file-paths, you will -have to create this file by hand, mksurfdata.pl will NOT be able -to create it for you (other than the first year which will be set to the values -entered on the command line). Note, that when &ptclm; is run, it CAN create these -files for you from a simpler format (see ). -Instead of a filepath you have a list of XML elements that give information on the PFT's -and harvesting for example: - -<pft_f>100</pft_f><pft_i>1</pft_i><harv>0,0,0,0,0</harv><graz>0</graz> - -So the <pft_f> tags give the PFT fractions and the <pft_i> tags give the -index for that fraction. Harvest is an array of five elements, and grazing is a single -value. Like the usual file each list of XML elements goes with a year, and there is -limit on the number of characters that can be used. - - - - - -Standard Practices when using <command>mksurfdata</command> - -In this section we give the recommendations for how to use mksurfdata -to give similar results to the files that we created when using it. - - -If you look at the standard surface datasets that we have created and provided for use, -there are three practices that we have consistently done in each (you also see these in -the sample namelists and in the mksurfdata.pl script). The first is -that we always output data in double precision (hence outnc_double -is set to .true.). The next is that we always use the procedure -for creating transient datasets (using mksrf_fdynuse) even when -creating datasets for a fixed simulation year. This is to ensure that the fixed year -datasets will be consistent with the transient datasets. When this is done a -"landuse_timeseries" dataset will be created -- but will NOT be used in &clm;. If you look -at the sample namelist mksurfdata.namelist you note that it -sets mksrf_fdynuse to the file -landuse_timeseries_hist_simyr2000.txt, where the single file entered is -the same PFT file used in the rest of the namelist (as mksrf_fvegtyp). -The last practice that we always do is to always set mksrf_ftopo, -even if glacier elevation classes are NOT active. This is -important in limiting urban areas based on topographic height, and hence is important -to use all the time. The glacier multiple elevation classes will be used as well if -you are running a compset with the active glacier model. - - -There are two other important practices for creating urban single point datasets. The -first is that you often will want to set all_urban to -.true. so that the dataset will have 100% of the gridcell output -as urban rather than some mix of: urban, vegetation types, and other landunits. The -next practice is that most of our specialized urban datasets have custom values for -the urban parameters, hence we do NOT want to use the global urban dataset to get -urban parameters -- we use a previous version of the surface dataset for the urban -parameters. However, in order to do this, we need to append onto the previous surface -dataset the grid and land mask/land fraction information from the grid and fraction -datasets. This is done in mksurfdata.pl using the NCO -program ncks. An example of doing this for the Mexico City, Mexico -urban surface dataset is as follows: - -> ncks -A $CSMDATA/lnd/clm2/griddata/griddata_1x1pt_mexicocityMEX_c090715.nc \ -$CSMDATA/lnd/clm2/surfdata/surfdata_1x1_mexicocityMEX_simyr2000_c100407.nc -> ncks -A $CSMDATA/lnd/clm2/griddata/fracdata_1x1pt_mexicocityMEX_navy_c090715.nc \ -$CSMDATA/lnd/clm2/surfdata/surfdata_1x1_mexicocityMEX_simyr2000_c100407.nc - -Note, if you look at the current single point urban surface datasets you will note -that the above has already been done. - - -The final issue is how to build mksurfdata. When NOT optimized -mksurfdata is very slow, and can take many hours to days to -even run for medium resolutions such as one or two degree. So usually you will want -to run it optimized. Possibly you also want to use shared memory parallelism using -&omp; with the SMP option. The problem with running optimized is that -answers will be different when running optimized versus non-optimized for most -compilers. So if you want answers to be the same as a previous surface dataset, you -will need to run it on the same platform and optimization level. Likewise, running -with or without &omp; may also change answers (for most compilers it will NOT, however -it does for the IBM compiler). However, answers should be the same regardless of the -number of threads used when &omp; is enabled. Note, that the output surface datasets -will have attributes that describe whether the file was written out optimized or not, -with threading or not and the number of threads used, to enable the user to more -easily try to match datasets created previously. For more information on the different -compiler options for the &clm4; tools see . - - - - - - - -Using &ncl; scripts <command>ndepregrid.ncl</command> and -<command>aerdepregrid.ncl</command> to interpolate aerosol deposition datasets - -Unlike the other tools, these are &ncar; Command Language (&ncl;) scripts -and you will need to get a copy of &ncl; in order to use them. You also won't have to -build an executable in order to use them, hence no Makefile is provided. &ncl; is provided -for free download as either binaries or source code from: -http://www.ncl.ucar.edu/. The &ncl; -web-site also contains documentation on &ncl; and it's use. - - -By default at this point neither of these scripts HAS to be used, -as the model is now constructed to read aerosol and Nitrogen deposition from 2-degree -datasets and interpolate to the model resolution on the fly. The main reason you might -want to do this now, is for better performance for single-point simulations. - - -Both the ndepregrid.ncl and aerdepregrid.ncl -scripts have similar interfaces and you customize the output resolution and -characteristics based on the settings of environment variables that you set (if you -don't set any of the variables, the script has defaults that it will use). -The list of environment variables that can be set are: - -RES -- output resolution name -RCP -- representative concentration pathway for future scenarios -(example 2.6, 4.5, 6, or 8.5) -SIM_YR -- simulation year (example 1850 or 2000) -SIM_YR_RNG -- simulation year range (example 1850-2000 or -1850-2100) -GRDFIL -- full pathname of grid file to use -(in place of getting the default grid file based on the RES value) -CSMDATA -- &cesm; inputdata directory -CLM_ROOT -- root directory for &clm; (models/lnd/clm directory) - - - - -You MUST provide either RES or both -GRDFIL AND RES. If you -just give RES the default namelist database in -models/lnd/clm/bld will be used to find the default grid -file based on the resolution name RES. If you provide -GRDFIL the input pathname of the gridfile provided will be used, -and the output filename will include RES as part of it's name -to designate it as an output file at that resolution. - - - -Both scripts assume that you will be interpolating from a native resolution of 1.9x2.5 -and using the default files found in the namelist database to interpolate from. If you -want to interpolate from another resolution or use other files, you would need to edit -the scripts to do so. Both scripts also use a bilinear interpolation to do the -regridding. The environment variables: RCP, SIM_YR, -and SIM_YR_RNG will be used to query the namelist database to -determine which native dataset to interpolate from. If you don't provide valid -values for these variables, it won't be able to find a dataset to interpolate from. -You can use the build-namelist script to query what the valid values for these can -be. Likewise, when you use RES to determine the grid file to interpolate -to, it needs to be a valid value from the namelist database. - - -The scripts can be used to interpolate from (and create output) constant or -transient datasets. -Constant datasets specify the SIM_YR and set SIM_YR_RNG -to constant (which is also the default). Transient datasets need -to specify both SIM_YR and SIM_YR_RNG, where -SIM_YR is set to the first year in the interval (typically 1850). - - -The default for CSMDATA works for &ncar; computers, but will need to -be set to the top level directory location of your &cesm; input data on other computers. -If you set this as a default for your shell when you login (for example with your -$HOME/.cshrc if you use csh) you won't have to set it each time -you run the script. CLM_ROOT will default to the proper location -when you run it in the models/lnd/clm/tools/ncl_script -directory. It is only useful if you want to run the script out of a different -directory. - - -Using <command>ndepregrid.ncl</command> to interpolate Nitrogen deposition datasets - -ndepregrid.ncl interpolates the Nitrogen deposition datasets from one resolution -to another. - - - -Interpolating Nitrogen deposition files is no longer needed, because the model can -read Nitrogen deposition files at one resolution and interpolate to the resolution the -model is running at on the fly. Interpolating to another -resolution is only useful for very course resolutions, if you want to save some computing -resources in reading larger datasets. For example, this may be useful in obtaining -single-point datasets. - - - -For example, to interpolate to an output resolution of 0.9x1.25, for a constant -simulation-year of 1850, you would do the following: - -> env RES=0.9x1.25 SIM_YR=1850 ncl ndepregrid.ncl - - - - - -Using <command>aerdepregrid.ncl</command> to interpolate Aerosol deposition datasets - -aerdepregrid.ncl interpolates the Aerosol deposition datasets from one resolution. -It can be used to interpolate either constant datasets (for example: -aerosoldep_monthly_2000_0.9x1.25_c090828.nc) or transient datasets (for example: -aerosoldep_monthly_1849-2006_0.9x1.25_c090830.nc). - - - -Interpolating aerosol deposition files is no longer needed, because the &datm; model can -read aerosol deposition files at one resolution and interpolate to the resolution the -model is running at on the fly. Interpolating to another -resolution is only useful for very course resolutions, if you want to save some computing -resources in reading larger datasets. For example, this may be useful in obtaining -single-point datasets. - - - -For example, to interpolate to an output resolution of 4x5, for a transient -simulation-year range of 1850 to 2100 and the rcp of 8.5, you would do the following: - -> env RES=4x5 SIM_YR=1850 SIM_YR_RNG=1850-2100 RCP=8.5 ncl ndepregrid.ncl - - - - - - -How to Customize Datasets for particular Observational Sites - -There are two ways to customize datasets for a particular observational site. The first -is to customize the input to the tools that create the dataset, and the second is to -over-write the default data after you've created a given dataset. Depending on the tool -it might be easier to do it one way or the other. In we list the files that are most likely to be -customized and the way they might be customized. Of those files, the ones you are most -likely to customize are: fatmlndfrc, fsurdat, faerdep (for &datm;), and -stream_fldfilename_ndep. Note mksurfdata as documented previously -has options to overwrite the vegetation and soil types. For more information on this also see - and &ptclm; uses these methods to -customize datasets see . - - -Another aspect of customizing your input datasets is customizing the input atmospheric -forcing datasets. See the for more -information on this. Also the chapter on &ptclm; in -has information on using the AmeriFlux tower site data as atmospheric forcing. - - - - -Conclusion of tools description - -We've given a description of how to use the different tools with &clm; to create -customized datasets. In the next chapter we will talk about how to make these -files available for build-namelist so that you can easily create simulations -that include them. In the chapter on single-point and regional datasets we also -give an alternative way to enter new datasets without having to edit files. - - - - - diff --git a/doc/UsersGuide/trouble_shooting.xml b/doc/UsersGuide/trouble_shooting.xml deleted file mode 100644 index 418cfabaae..0000000000 --- a/doc/UsersGuide/trouble_shooting.xml +++ /dev/null @@ -1,545 +0,0 @@ - - -Trouble Shooting Problems - -In this chapter we give some guidance on what to do when you encounter some of the -most common problems. We can't cover all the problems that a user could potentially -have, but we will try to help you recognize some of the most common situations. -And we'll give you some suggestions on how to approach the problem to come up with -a solution. - - -In general you will run into one of three type of problems: - -configure-time -build-time -run-time - -You may also run into problems with create_newcase itself, or -with the archiving scripts -- for those problems you should consult the -&cesmrel; Scripts User's Guide. - - - -Trouble with Configuration - -The first type of problem happens when you invoke the configure -case -command. This indicates there is something wrong with your template files, or input -datasets, or the details of what you are trying to configure the model to do. -There's also a trouble-shooting chapter in the &cesmrel; -Scripts User's Guide. Many of the problems with configuration can be resolved -with the guidelines given there. Here we will restrict ourselves to problems from the -&clm; or &datm; templates or input files. - -Example of configure problem with missing datasets - -> ./create_newcase -case T31rcp6 -res T31_g37 -compset IRCP60CN \ --mach bluefire -> ./configure -case - -The following is what is displayed to the screen. - -Generating resolved namelist, prestage, and build scripts -configure done. -adding use_case 1850-2100_rcp6_transient defaults for var clm_demand with val flanduse_timeseries -adding use_case 1850-2100_rcp6_transient defaults for var clm_start_type with val startup -adding use_case 1850-2100_rcp6_transient defaults for var model_year_align_ndep with val -1850 -adding use_case 1850-2100_rcp6_transient defaults for var rcp with val 6 -adding use_case 1850-2100_rcp6_transient defaults for var sim_year with val 1850 -adding use_case 1850-2100_rcp6_transient defaults for var sim_year_range with val -1850-2100 -adding use_case 1850-2100_rcp6_transient defaults for var stream_year_first_ndep with val -1850 -adding use_case 1850-2100_rcp6_transient defaults for var stream_year_last_ndep with val -2100 -adding use_case 1850-2100_rcp6_transient defaults for var use_case_desc with val Simulate -transient land-use, aerosol and Nitrogen deposition changes with historical data from -1850 to 2005 and then with the RCP6 scenario from AIM - -build-namelist - No default value found for flanduse_timeseries. - Are defaults provided for this resolution and land mask? -ERROR: generate_resolved.csh error for lnd template -configure error: configure generated error in attempting to created resolved scripts - - -The important thing to note here is the line: - -ERROR: generate_resolved.csh error for lnd template - -which tells us that the problem is in the land template. It may also indicate problems -in one of the other templates (atm, ccsm, cpl, glc, ice, or ocn), in which case you -should consult the appropriate model user's guide, and examine the given template file -in Tools/Templates. For more information on working with template -files see . - - -In the example above, it's obvious that the problem is coming from the &clm; &buildnml;, -in other situations it might not be so obvious where the problem is occurring. In such -cases it might be useful to add a "set echo" command to the top of the template file so -that each command in the template will be echoed to the screen and you can see what -is happening and where the error is occurring. - -set echo - - - -In the example, the error is that the &clm; XML database does NOT have a -flanduse_timeseries for the given resolution, rcp scenario and ocean mask. -That means you will need to create the file and then supply the file into your case. See - for more information on creating files, and see - for more information on adding files to the -XML database. Alternatively, you can provide the file to your case by creating -a user namelist as shown in . - - - -The two most common problems from your &clm; template will be errors from the &clm; -&configure; or &buildnml;. For more information on these scripts see: - and -the section on &CLMBLDNML;. - - - - - -Trouble with Building - -Here's an example of running the build for a case and having it fail in the land model -build. As you can see it lists which model component is being built and the build log -for that component. - - CCSM BUILDEXE SCRIPT STARTING - - Build Libraries: mct pio csm_share -Sat Jun 19 21:21:19 MDT 2010 /ptmp/erik/test_build/mct/mct.bldlog.100619-212107 -Sat Jun 19 21:22:18 MDT 2010 /ptmp/erik/test_build/pio/pio.bldlog.100619-212107 -Sat Jun 19 21:23:18 MDT 2010 -/ptmp/erik/test_build/csm_share/csm_share.bldlog.100619-212107 -Sat Jun 19 21:24:00 MDT 2010 /ptmp/erik/test_build/run/cpl.bldlog.100619-212107 -Sat Jun 19 21:24:00 MDT 2010 /ptmp/erik/test_build/run/atm.bldlog.100619-212107 -Sat Jun 19 21:24:06 MDT 2010 /ptmp/erik/test_build/run/lnd.bldlog.100619-212107 -ERROR: clm.buildexe.csh failed, see /ptmp/erik/test_build/run/lnd.bldlog.100619-212107 -ERROR: cat /ptmp/erik/test_build/run/lnd.bldlog.100619-212107 - -You can then examine the build log that failed and see what went wrong. Most compilers -will give the full filepath and line number for the file that filed to compile. - - - - -Trouble with Running - -Tracking down problems while the model is running is much more difficult to do -than configure or build problems. In this section we will give some suggestions -on how to find run time problems. Below we show the log file results of a job -that aborted while running. - - CCSM PRESTAGE SCRIPT HAS FINISHED SUCCESSFULLY -Sun Jun 20 18:24:06 MDT 2010 -- CSM EXECUTION BEGINS HERE -Sun Jun 20 18:24:35 MDT 2010 -- CSM EXECUTION HAS FINISHED -Model did not complete - see /ptmp/erik/test_run/run/cpl.log.100620-182358 - -In the next section we will talk about using the different log files to track -down problems, and find out where the problem is coming from. In the section -after that we give some general advice on debugging problems and some suggestions -on ideas that may be helpful to track the problem down. Some of the examples -below are from the &KnownBugs; file. - - -Tracking Problems by Querying Log Files - -The first thing to do when tracking down problems is to query the different log -files to see if you can discover where the problem occurs, and any error messages about -it. It's important to figure out if the problem comes in at initialization or in the -run phase of the model, and in which model component the problem happens. There -are different log files for the different major components, and they all end -with the date and time in YYMMDD-HHMMSS format (2-digit: year, month, day, hour -minute and second). When the model runs to completion the log files will be copied -to the logs directory in the script directory, but when the -model fails they will remain in the run directory. Here's an example list of -log files from an "I" case where the model dies in the land model initialization. -For "I" cases the sea-ice and ocean components are just stubs and don't create -log files (and unless running with the active land-ice model "glc" log files won't -be created either). - -atm.log.100620-182358 -ccsm.log.100620-182358 -cpl.log.100620-182358 -lnd.log.100620-182358 - - - - -The coupler log file - -The first log file to check is the coupler log file so that you can see where -the model dies and which model component it fails in. When the model dies at -initialization the last model component listed is the component that failed. - - -Example of a case that fails in the &clm; land model initialization. - -(seq_timemgr_clockPrint) Prev Time = 00001201 00000 -(seq_timemgr_clockPrint) Next Time = 99991201 00000 -(seq_timemgr_clockPrint) Intervl yms = 9999 0 0 - -(seq_mct_drv) : Initialize each component: atm, lnd, ocn, and ice -(seq_mct_drv) : Initialize atm component -(seq_mct_drv) : Initialize lnd component - - - - - -The ccsm log file - -The ccsm log files are to some extent the "garbage collection" of log output. The -&clm; sends it's output from it's master processor, but sends other output and possibly -errors to the ccsm log file. Because, of this, often error messages are somewhere in the -ccsm log file. However, since there is so much other output it may be difficult to find. -For example, here is some output from an older version of &cesm; (&cesm102;) where the -RTM river routing file (before it was converted to &netcdf;) was not provided and -the error on the open statement for the file was embedded near the end of the ccsm log -file. - -NODE# NAME -( 0) be1105en.ucar.edu -"/gpfs/proj2/fis/cgd/home/erik/clm_trunk/models/lnd/clm/src/riverroute/RtmMod.F90", line -239: 1525-155 The file name provided in the OPEN statement for unit 1 has zero length or -contains all blanks. The program will recover by ignoring the OPEN statement. -"/gpfs/proj2/fis/cgd/home/erik/clm_trunk/models/lnd/clm/src/riverroute/RtmMod.F90", line -241: 1525-001 The READ statement on the file fort.1 cannot be completed because the end -of the file was reached. The program will stop. - -Running: ./ccsm.exe -Please wait... - -Memory usage for ./ccsm.exe (task # 0) is: 51696 KB. Exit status: 1. Signal: 0 - -Although the example is from an earlier version of the model it still serves to -illustrate finding problems from the ccsm log file. - - -When working with the ccsm log file, for a run-time problem, you will need to be able -to separate it's output into three categories: pre-crash, crash, and post-crash. The -pre-crash section is everything that is normal output for good operation of the model. -The crash section is the section where the model dies and reports on the actual problem. -the post-crash section is the cleanup and finalization after the model dies. The most -important part of this of course is the crash section. The tricky part is distinguishing -it from the other sections. Also because the ccsm log file most likely has duplicated -output from multiple processors it is even more difficult to distinguish the different -sections and to some extent the sections may be intertwined, as different processors -reach the different sections at different times. Because, of this reducing the number of -processors for your simulation may help you sort out the output in the file (see -). Also much of the output from the ccsm log file are -system level information having to do with &mpi; multiprocessing. Usually you can ignore -this information, but it makes it more difficult to trudge through. - - -Sometimes the ccsm log file is the ONLY file available, because the model terminates -early in initialization. In this case understanding the output in the ccsm log file -becomes even more important. This also indicates the model did NOT advance far enough -to reach the initialization of the individual model components. This may mean that the -initialization of the multiprocessing for &mpi; and/or &omp; failed, or that the -reading of the driver namelist file "drv_in" failed. - - -Here we show those three sections for a ccsm log file where a two task job failed on -reading the namelist file. For a typical job with many tasks similar sections of this -will be repeated not just twice but for each task and hence make it harder to read. - - -Pre-crash section of the ccsm log file - -ATTENTION: 0031-386 MP_INSTANCES setting ignored when LoadLeveler is not being used. -ATTENTION: 0031-386 MP_INSTANCES setting ignored when LoadLeveler is not being used. -ATTENTION: 0031-378 MP_EUIDEVICE setting ignored when LoadLeveler is not being used. -ATTENTION: 0031-386 MP_INSTANCES setting ignored when LoadLeveler is not being used. - 0:INFO: 0031-724 Executing program: </usr/local/lsf/7.0/aix5-64/bin/lsnrt_run> - 1:INFO: 0031-724 Executing program: </usr/local/lsf/7.0/aix5-64/bin/lsnrt_run> - 0:/contrib/bin/ccsm_launch: process 401894 bound to logical CPU 0 on host be0310en.ucar.edu ... - 1:/contrib/bin/ccsm_launch: process 439264 bound to logical CPU 1 on host be0310en.ucar.edu ... - 0:INFO: 0031-619 64bit(us, Packet striping on) ppe_rmas MPCI_MSG: MPI/MPCI library was compiled on Wed Aug 5 13:36:06 2009 - 0: - 1:LAPI version #14.26 2008/11/23 11:02:30 1.296 src/rsct/lapi/lapi.c, lapi, rsct_rpt53, rpt53s004a 09/04/29 64bit(us) library compiled on Wed Apr 29 15:30:42 2009 - 1:. - 1:LAPI is using lightweight lock. - 0:LAPI version #14.26 2008/11/23 11:02:30 1.296 src/rsct/lapi/lapi.c, lapi, rsct_rpt53, rpt53s004a 09/04/29 64bit(us) library compiled on Wed Apr 29 15:30:42 2009 - 0:. - 0:LAPI is using lightweight lock. - 0:Use health ping for failover/recovery - 1:Use health ping for failover/recovery - 0:Initial communication over instance 2. - 1:Initial communication over instance 0. - 1:IB RDMA initialization completed successfully - 1:The MPI shared memory protocol is used for the job - 0:IB RDMA initialization completed successfully - 0:LAPI job ID for this job is: 1684890719 - 0:The MPI shared memory protocol is used for the job - 0:(seq_comm_setcomm) initialize ID ( 7 GLOBAL ) pelist = 0 1 1 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_setcomm) initialize ID ( 2 ATM ) pelist = 0 1 1 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_setcomm) initialize ID ( 1 LND ) pelist = 0 1 1 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_setcomm) initialize ID ( 4 ICE ) pelist = 0 1 1 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_setcomm) initialize ID ( 5 GLC ) pelist = 0 1 1 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_setcomm) initialize ID ( 3 OCN ) pelist = 0 1 1 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_setcomm) initialize ID ( 6 CPL ) pelist = 0 1 1 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_joincomm) initialize ID ( 8 CPLATM ) join IDs = 6 2 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_joincomm) initialize ID ( 9 CPLLND ) join IDs = 6 1 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_joincomm) initialize ID ( 10 CPLICE ) join IDs = 6 4 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_joincomm) initialize ID ( 11 CPLOCN ) join IDs = 6 3 ( npes = 2) ( nthreads = 1) - 0:(seq_comm_joincomm) initialize ID ( 12 CPLGLC ) join IDs = 6 5 ( npes = 2) ( nthreads = 1) - 0: - 0: (seq_comm_printcomms) ID layout : global pes vs local pe for each ID - 0: gpe LND ATM OCN ICE GLC CPL GLOBAL CPLATM CPLLND CPLICE CPLOCN CPLGLC nthrds - 0: --- ------ ------ ------ ------ ------ ------ ------ ------ ------ ------ ------ ------ ------ - 0: 0 : 0 0 0 0 0 0 0 0 0 0 0 0 1 - 1: 1 : 1 1 1 1 1 1 1 1 1 1 1 1 1 - 1: - 0: (t_initf) Read in prof_inparm namelist from: drv_in - 1: (seq_io_init) cpl_io_stride, iotasks or root out of bounds - resetting to defaults 4 0 1 - 0: piolib_mod.f90 1353 1 2 1 2 - 1: piolib_mod.f90 1353 1 2 1 2 - 0: pio_support::pio_die:: myrank= 0 : ERROR: piolib_mod.f90: 1354 : not enough procs for the stride - 1: pio_support::pio_die:: myrank= 1 : ERROR: piolib_mod.f90: 1354 : not enough procs for the stride - - - -Crash section of the ccsm log file - - 0: - 0: Traceback: - 1: - 1: Traceback: - 0: Offset 0x00000c4c in procedure __pio_support_NMOD_piodie, near line 88 in file pio_support.F90.in - 1: Offset 0x00000c4c in procedure __pio_support_NMOD_piodie, near line 88 in file pio_support.F90.in - 0: Offset 0x00000fd0 in procedure __piolib_mod_NMOD_init, near line 1354 in file piolib_mod.F90 - 1: Offset 0x00000fd0 in procedure __piolib_mod_NMOD_init, near line 1354 in file piolib_mod.F90 - 1: Offset 0x00000398 in procedure __seq_io_mod_NMOD_seq_io_init, near line 247 in file /gpfs/proj2/fis/cgd/home/erik/clm_trunk/models/drv/shr/seq_io_mod.F90 - 0: Offset 0x00000398 in procedure __seq_io_mod_NMOD_seq_io_init, near line 247 in file /gpfs/proj2/fis/cgd/home/erik/clm_trunk/models/drv/shr/seq_io_mod.F90 - 0: Offset 0x0001aa88 in procedure ccsm_driver, near line 465 in file /gpfs/proj2/fis/cgd/home/erik/clm_trunk/models/drv/driver/ccsm_driver.F90 - 0: --- End of call chain --- - 1: Offset 0x0001aa88 in procedure ccsm_driver, near line 465 in file /gpfs/proj2/fis/cgd/home/erik/clm_trunk/models/drv/driver/ccsm_driver.F90 - 1: --- End of call chain --- - - - -Post-crash section of the ccsm log file - - 1:Communication statistics of task 1 is associated with task key: 1684890719_1 - 0:Communication statistics of task 0 is associated with task key: 1684890719_0 - 0: - 0:Running: ./ccsm.exe - 0:Please wait... - 0: - 0:Memory usage for ./ccsm.exe (task # 0) is: 198892 KB. Exit status: 134. Signal: 0 - 1: - 1:Running: ./ccsm.exe - 1:Please wait... - 1: - 1:Memory usage for ./ccsm.exe (task # 0) is: 198572 KB. Exit status: 134. Signal: 0 -INFO: 0031-656 I/O file STDOUT closed by task 0 -INFO: 0031-656 I/O file STDERR closed by task 0 -ERROR: 0031-250 task 0: IOT/Abort trap -INFO: 0031-656 I/O file STDOUT closed by task 1 -INFO: 0031-656 I/O file STDERR closed by task 1 -ERROR: 0031-250 task 1: IOT/Abort trap -INFO: 0031-639 Exit status from pm_respond = 0 -ATTENTION: 0031-386 MP_INSTANCES setting ignored when LoadLeveler is not being used. -Job /usr/local/lsf/7.0/aix5-64/bin/poejob /contrib/bin/ccsm_launch /contrib/bin/job_memusage.exe ./ccsm.exe - -TID HOST_NAME COMMAND_LINE STATUS TERMINATION_TIME -===== ========== ================ ======================= =================== -00000 be0310en /contrib/bin/ccs Exit (134) 08/31/2010 12:32:57 -00001 be0310en /contrib/bin/ccs Exit (134) 08/31/2010 12:32:57 - - - - - -The &clm; log file - -Of course when you are working with and making changes to &clm;, most of your focus -will be on the &clm; log file and the errors it shows. As already pointed out -if you don't see errors in the lnd.log.* file you should look -in the ccsm.log.* to see if any errors showed up there. - - -Here's an example of the lnd.log.* file when running -&PTSMODE; with initial conditions (this is bug 1025 in the &KnownBugs; file). - - Successfully initialized variables for accumulation - - reading restart file I2000CN_f09_g16_c100503.clm2.r.0001-01-01-00000.nc - Reading restart dataset - ERROR - setlatlon.F:Cant get variable dim for lat or lsmlat - ENDRUN: called without a message string - - - - - -The &datm; log file - -When working with "I cases" the second most common problems after &clm; problems are -problems with the data atmosphere model. So examining the atm.log.* -is important. - - -Here's an example of a problem that occurs when the wrong prescribed aerosol file -is given to a pt1_pt1 simulation. - -(datm_comp_init) atm mode = CLMNCEP -(shr_strdata_init) calling shr_dmodel_mapSet for fill -(shr_strdata_init) calling shr_dmodel_mapSet for remap - ('shr_map_getWts') ERROR: yd outside bounds 19.5000000000000000 -(shr_sys_abort) ERROR: ('shr_map_getWts') ERROR yd outside 90 degree bounds -(shr_sys_abort) WARNING: calling shr_mpi_abort() and stopping - - - - - -The batch log files - -The names of the batch log files will depend on the batch system of the machine -that is being used. They will normally be in the script directory. Usually, they -don't contain important information, but they are a last resort place to look for -error messages. On the &ncar; IBM system "bluefire" the batch files are called -with names that start with "poe" and then either "stderr" or "stdout", with the -job number at the end. - - - - - - -General Advice on Debugging Run time Problems - -Here are some suggestions on how to track down a problem while running. In general -if the problem still occurs for a simpler case, it will be easier to track down. - -Run in DEBUG mode -Run with a smaller set of processors -Run in serial mode with a single processor -Run at a lower resolution -Run a simpler case -Run with a debugger - - - - -Run in DEBUG mode - -The first thing to try is to run in DEBUG mode so that float point trapping will be -triggered as well as array bounds checking and other things the compiler can turn -on to help you find problems. To do this edit the &envbuild; file and set DEBUG - to TRUE as follows: - -> ./xmlchange -file env_build.xml -id DEBUG -val TRUE - - - - - -Run with a smaller set of processors - -Another way to simplify the system is to run with a smaller set of processors. You -will need to clean the configure and edit the env_mach_pes.xml. -For example, to run with four processors: - -> ./configure -cleanall -> ./xmlchange -file env_mach_pes.xml -id NTASKS_ATM -val 4 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_LND -val 4 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_ICE -val 4 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_OCN -val 4 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_CPL -val 4 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_GLC -val 4 -> ./configure -case - -Another recommended simplification is to run without threading, so set the -NTHRDS for each component to "1" if it isn't already. Sometimes, -multiprocessing problems require a certain number of processors before they occur -so you may not be able to debug the problem without enough processors. But, it's always -good to reduce it to as low a number as possible to make it simpler. For threading -problems you may have to have threading enabled to find the problem, but you can run -with 1, 2, or 3 threads to see what happens. - - - - -Run in serial mode with a single processor - -Simplifying to one processor removes all multi-processing problems and makes -the case as simple as possible. If you can enable USE_MPI_SERIAL -you will also be able to run interactively rather than having to submit to a job -queue, which sometimes makes it easier to run and debug. If you can use -USE_MPI_SERIAL you can also use threading, but still run interactively -in order to use more processors to make it faster if needed. - -> ./configure -cleanall -# Set tasks and threads for each component to 1 -# You could also set threads to something > 1 for speed, but still -# run interactively if threading isn't an issue. -> ./xmlchange -file env_mach_pes.xml -id NTASKS_ATM -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTHRDS_ATM -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_LND -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTHRDS_LND -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_ICE -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTHRDS_ICE -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_OCN -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTHRDS_OCN -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_CPL -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTHRDS_CPL -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTASKS_GLC -val 1 -> ./xmlchange -file env_mach_pes.xml -id NTHRDS_GLC -val 1 -# If mpi-serial capability is available on the machine you are using -# set USE_MPI_SERIAL to true so that you can run interactively -> ./xmlchange -file env_conf.xml -id USE_MPI_SERIAL -val TRUE -> ./configure -case -# Then build your case -# And finally run, by running the *.run script interactively -# (If you were able to set USE_MPI_SERIAL to true) - - - - - -Run at a lower resolution - -If you can create a new case running at a lower resolution and replicate the problem -it may be easier to solve. This of course requires creating a whole new case, and trying -out different lower resolutions. - - - - -Run a simpler case - -Along the same lines, you might try running a simpler case, trying another compset -with a simpler setup and see if you can replicate the problem and then debug from that -simpler case. Again, of course you will need to create new cases to do this. - - - - -Run with a debugger - -Another suggestion is to run the model with a debugger such as: dbx, -gdb, or totalview. Often to run with a debugger -you will need to reduce the number of processors as outlined above. Some debuggers such -as dbx will only work with one processor, while more advanced -debuggers such as totalview can work with both &mpi; tasks and OMP -threads. Even simple debuggers though can be used to query core files, to see where -the code was at when it died (for example using the where in -dbx for a core file can be very helpful. For help in running -with a debugger you will need to contact your system administrators for the machine -you are running on. - - - - - - - - diff --git a/doc/source/_static/css/custom.css b/doc/source/_static/css/custom.css new file mode 100644 index 0000000000..10abb45722 --- /dev/null +++ b/doc/source/_static/css/custom.css @@ -0,0 +1,17 @@ +/* Make equation numbers float to the right */ +.eqno { + margin-left: 5px; + float: right; +} +/* Hide the link... */ +.math .headerlink { + display: none; + visibility: hidden; +} +/* ...unless the equation is hovered */ +.math:hover .headerlink { + display: inline-block; + visibility: visible; + /* Place link in margin and keep equation number aligned with boundary */ + margin-right: -0.7em; +} diff --git a/doc/source/_static/pop_ver.js b/doc/source/_static/pop_ver.js new file mode 100644 index 0000000000..b8c58658a8 --- /dev/null +++ b/doc/source/_static/pop_ver.js @@ -0,0 +1,37 @@ +$(document).ready(function() { + /* For a URL that looks like + https://blah.github.io/versions/VERSIONFOO/html/bar/index.html, set cur_version_dir to + 'VERSIONFOO' (i.e., the portion of the path following 'versions'). + */ + var proj_end = document.baseURI.indexOf("versions") + 9; + var end = document.baseURI.indexOf("/", proj_end); + var cur_version_dir = document.baseURI.substring(proj_end, end); + var mylist = $("#version-list"); + mylist.empty(); + $.getJSON(version_json_loc, function(data) { + if (data.hasOwnProperty(cur_version_dir)) { + /* First add the current version so that it appears first in the drop-down + menu and starts as the selected element of the menu. If you click on the + current version, you should stay at the current page. + + The conditional around this block should generally be true, but we check it + just in case the current version is missing from the versions.json file for + some reason. + */ + cur_version_name = data[cur_version_dir]; + mylist.append($("