diff --git a/list_of_python_utilities.md b/list_of_python_utilities.md deleted file mode 100644 index f28d8b71..00000000 --- a/list_of_python_utilities.md +++ /dev/null @@ -1,46 +0,0 @@ -A list of the python utilities currently in PROCESS, highlighting the ones which do not have user guides in bold. - -- a_to_b.conf - -a_to_b.py -archive.sh -build_index.py -cad_output.py -compare_radials.py -> Katy Ellis -convert_in_dat.py -cost_bar.py -cost_pie.py -create_csv4database.py -create_dicts.py -diagnose_process.py -diagnose_uncertainties.py -display_uncertainties.py -evaluate_uncertainties.py -fit_profile.py -in_dat_comparison.py -kallenbach_plotting.py -> ? -kallenbach_plotting2.py -> ? -line_length_standard.py -> James Morris -make_plot_dat.py -mcnp_output.py -mfile_comparison.py -ndscan.py -ndscan_and_package.py -ndscan_package_only.py -ndscan_visualisation.py -output_data.py -output_detailed.py -> James Morris -output_summary.py -plot_comparison.py -plot_mfile_sweep.py -plot_opti_process.py -plot_proc.py -plot_profiles.py -plot_sankey.py -> Adam Brown -popcon.py -ref_check.py -root_path.py -> ? -run_process.py -test_process.py -xls_create.py -> Michael Kovari -write_new_in_dat.py diff --git a/utilities/compare_radials.py b/utilities/compare_radials.py deleted file mode 100755 index d47b8b5e..00000000 --- a/utilities/compare_radials.py +++ /dev/null @@ -1,353 +0,0 @@ -#!/usr/bin/env python -""" - Radial plot comparison tool using PLASMOD-type input - - Katy Ellis - 01/03/2018 - CCFE - -""" - -####################### -# imported libraries - -# from matplotlib import rc,use -# rc('font',size=15) -# rc('lines',lw=1.5)#is overwritten by setting coulours/linestylies -# rc(('xtick','ytick'),labelsize=15) -# rc('figure',figsize=(8,6)) -# rc('figure.subplot',bottom=0.12) -# rc('figure.subplot',left=0.19) -# rc('figure.subplot',right=0.81) - -import process.io.mfile as mf -import matplotlib.pyplot as plt -import matplotlib.backends.backend_pdf as bpdf -import argparse -import numpy as np -from numpy import loadtxt - - -def plotProfile(n, i, p, count): - """Function to plot radial profiles from 1D modelling, e.g. from PLASMOD or ASTRA - Arguments: - n --> number of input files in column format - i --> column number to plot - p --> page number to add plot - count --> position of plot on page - """ - - # for i in range(0,len(ASSIGN_PAGE)): - # if(ASSIGN_PAGE[i]==p): - if count == 0: - figure = 221 - if count == 1: - figure = 222 - if count == 2: - figure = 223 - if count == 3: - figure = 224 - page[p].add_subplot(figure) - - for f in range(0, n): - if f == 0: - plt.plot( - DATA_PROFILES[f][0], - DATA_PROFILES[f][i], - linewidth=2, - color="C1", - zorder=2, - marker="", - label=LEGEND_NAME[f], - ) - else: - plt.scatter( - DATA_PROFILES[f][0], - DATA_PROFILES[f][i], - s=60, - color="#808284", - zorder=1, - label=LEGEND_NAME[f], - ) - # plt.scatter(DATA_PROFILES[f][0], DATA_PROFILES[f][i], s = 20, color = 'C0', label=LEGEND_NAME[f]) - - if ARGS.mfile is not None: - # Add profiles generated by PROCESS 0D (ipedestal = 1) - print(LIST_PROFILES[i]) - if (LIST_PROFILES[i]) == "DEN": - print("Adding density 0D") - plot_nprofile(plt) - if (LIST_PROFILES[i]) == "TE": - print("Adding electron temp 0D") - plot_tprofile(plt) - if (LIST_PROFILES[i]) == "Q": - print("Adding safety factor 0D") - plot_qprofile(plt) - - plt.xlabel(AXIS_TITLES[0]) - plt.ylabel(AXIS_TITLES[i]) - plt.title(PLOT_TITLES[i]) - plt.legend() - - -def plot_nprofile(prof): # cut-down version from plot_proc.py - """Function to plot density profile - Arguments: - prof --> axis object to add plot to - """ - # if ipedestal == 1: - rhocore1 = np.linspace(0, 0.95 * rhopedn) - rhocore2 = np.linspace(0.95 * rhopedn, rhopedn) - rhocore = np.append(rhocore1, rhocore2) - ncore = neped + (ne0 - neped) * (1 - rhocore**2 / rhopedn**2) ** alphan - - rhosep = np.linspace(rhopedn, 1) - nsep = nesep + (neped - nesep) * (1 - rhosep) / (1 - min(0.9999, rhopedn)) - - rho = np.append(rhocore, rhosep) - ne = np.append(ncore, nsep) - # else: - # rho1 = np.linspace(0,0.95) - # rho2 = np.linspace(0.95,1) - # rho = np.append(rho1,rho2) - # ne = ne0 * (1-rho**2)**alphan - ne = ne / 1e19 - prof.plot(rho, ne, linewidth=4, linestyle="--", color="#0082CA", label="PROCESS 0D") - prof.legend() - - -def plot_tprofile(prof): - """Function to plot temperature profile - Arguments: - prof --> axis object to add plot to - """ - # if ipedestal == 1: - rhocore1 = np.linspace(0, 0.9 * rhopedt) - rhocore2 = np.linspace(0.9 * rhopedt, rhopedt) - rhocore = np.append(rhocore1, rhocore2) - tcore = teped + (te0 - teped) * (1 - (rhocore / rhopedt) ** tbeta) ** alphat - - rhosep = np.linspace(rhopedt, 1) - tsep = tesep + (teped - tesep) * (1 - rhosep) / (1 - min(0.9999, rhopedt)) - - rho = np.append(rhocore, rhosep) - te = np.append(tcore, tsep) - # else: - # rho = np.linspace(0,1) - # te = te0 * (1-rho**2)**alphat - prof.plot(rho, te, linewidth=4, linestyle="--", color="#0082CA", label="PROCESS 0D") - prof.legend() - - -def plot_qprofile(prof): - """Function to plot q profile, from Sauter model. - Arguments: - prof --> axis object to add plot to - """ - rho = np.linspace(0, 1) - q_r_sauter = q0 + (q95 - q0) * (rho * rho) - - prof.plot(rho, q_r_sauter, linewidth=4, color="#0082CA", label="PROCESS 0D") - prof.legend() - - -def get_mfileData(): - """Function to assign data from the MFILE for use in 0D profiles - Arguments: None - """ - m_file = mf.MFile(ARGS.mfile) - scan = -1 # takes last scan value - global rhopedn - rhopedn = m_file.data["rhopedn"].get_scan(scan) - global neped - neped = m_file.data["neped"].get_scan(scan) - global alphan - alphan = m_file.data["alphan"].get_scan(scan) - global nesep - nesep = m_file.data["nesep"].get_scan(scan) - global ne0 - ne0 = m_file.data["ne0"].get_scan(scan) - global rhopedt - rhopedt = m_file.data["rhopedt"].get_scan(scan) - global teped - teped = m_file.data["teped"].get_scan(scan) - global te0 - te0 = m_file.data["te0"].get_scan(scan) - global tbeta - tbeta = m_file.data["tbeta"].get_scan(scan) - global alphat - alphat = m_file.data["alphat"].get_scan(scan) - global tesep - tesep = m_file.data["tesep"].get_scan(scan) - global q0 - q0 = m_file.data["q0"].get_scan(scan) - global q95 - q95 = m_file.data["q95"].get_scan(scan) - - -if __name__ == "__main__": - - ############################################################ - # Usage - - PARSER = argparse.ArgumentParser( - description="Program to fit a\ - general temperature or density profile from an ascii table." - ) - - PARSER.add_argument( - "-f", - "--filename0", - default="profile.txt", - help="ascii _RAD.DAT or .txt file containing data in columns,\ - default = IN.DAT_RADP.DAT", - ) - - PARSER.add_argument( - "-f1", "--filename1", help="optional ascii file containing data in columns" - ) - - PARSER.add_argument( - "-m", "--mfile", help="optional ascii mfile output from PROCESS" - ) - - ARGS = PARSER.parse_args() - - # The user may wish to change these inputs: - LEGEND_NAME = ["PROCESS 1D", "ASTRA"] # filenames for the legend - LIST_PROFILES = [ - "RHO", - "DEN", - "TE", - "TI", - "DEUT", - "TRIT", - "JBS", - "JCD", - "JTOT", - "IPOL", - "Q", - "VOL", - "DVOL", - "COND", - ] # list of properties - NUM_PAGES = 4 # number of pages required, e.g. 4 - PLOTS_PAGE = 4 # number of plots per page, e.g. 4 - - PLOT_TITLES = [" "] - PLOT_TITLES.append("Electron density") - PLOT_TITLES.append("Electron temperature") - PLOT_TITLES.append("Ion temperature") - PLOT_TITLES.append("Deuterium density") - PLOT_TITLES.append("Tritium density") - PLOT_TITLES.append("Bootstrap current density") - PLOT_TITLES.append("Current drive current density") - PLOT_TITLES.append("Total current density") - PLOT_TITLES.append("Poloidal current") - PLOT_TITLES.append("Safety factor") - PLOT_TITLES.append("Plasma volume") - PLOT_TITLES.append("dVolume/dr") - PLOT_TITLES.append("Plasma conductivity") - - AXIS_TITLES = ["r/a"] - AXIS_TITLES.append(r"$n_e / 10^{19} m^{-3}$") - AXIS_TITLES.append(r"$t_e / keV$") - AXIS_TITLES.append(r"$t_i / keV$") - AXIS_TITLES.append("$deut / 10^{19} m^{-3}$") - AXIS_TITLES.append("$trit / 10^{19} m^{-3}$") - AXIS_TITLES.append("$J_bs / MA.m^{-2}$") - AXIS_TITLES.append("$J_cd / MA.m^{-2}$") - AXIS_TITLES.append("$J_tot / MA.m^{-2}$") - AXIS_TITLES.append("$I_pol / MA$") - AXIS_TITLES.append("$q$") - AXIS_TITLES.append("$Vol / m^{3}$") - AXIS_TITLES.append("$dVol/dr / m^{2}$") - AXIS_TITLES.append("$Cond / MA.V^{-1}.m^{-1})$") - - PAGE_TITLES = ["Radial profiles (page 1)"] - PAGE_TITLES.append("Radial profiles (page 2)") - PAGE_TITLES.append("Radial profiles (page 3)") - PAGE_TITLES.append("Radial profiles (page 4)") - ################################# - - list_len = len(LIST_PROFILES) - print("number of profiles to be plotted = " + repr(list_len - 1)) - - # ASSIGN_PAGE = [1, 2, 2, 1, 1, 3, 3, 3, 3, 1, 4, 4, 2] #Which page for each plot (4 plots per page) - - fileArray = [ARGS.filename0] - if ARGS.filename1 is not None: - fileArray = [ARGS.filename0, ARGS.filename1] - n = len(fileArray) - print("number of files is: " + repr(n)) - DATA = [None] * n - DATA_PROFILES = [[0] * list_len for i in range(n)] - - # create vectors to store the data - useProfile = [None] * list_len - for i in range(0, list_len): - useProfile[i] = False - - for f in range(0, n): # loop over files - print("filename" + repr(f) + " = " + repr(fileArray[f])) - try: - DATA[f] = loadtxt(fileArray[f], unpack=True) - print("loaded data!") - print("data size - " + repr(DATA[f].size)) - except OSError: - print("Error: There is no file called", fileArray[f]) - exit() - except ValueError: - print( - "Error: In", - fileArray[f], - "all comment rows must\ - start with a #! Columns can only contain floats!", - ) - exit() - if DATA[f].size == 0: - print("Error: There is no data in", fileArray[f]) - exit() - - for i in range(0, list_len): - try: - DATA_PROFILES[f][i] = DATA[f][i] - useProfile[i] = True - except IndexError: - print( - "Warning: The column for the " - + repr(LIST_PROFILES[i]) - + " in file " - + repr(f) - + " does not exist!\ - Remember to start counting at 0!" - ) - print(useProfile) - - if ARGS.mfile is not None: - get_mfileData() - - # Make plots - i = 1 # i.e. don't plot rho against itself - page = [None] * NUM_PAGES - for p in range(0, NUM_PAGES): - page[p] = plt.figure(figsize=(12, 9), dpi=80) - plt.suptitle(PAGE_TITLES[p]) - count = 0 - while i < list_len: - - # plot radial profiles from column format - plotProfile(n, i, p, count) - - i = i + 1 - count = count + 1 - # to determine plot position on page - if count == PLOTS_PAGE: - break - - with bpdf.PdfPages("radial_profiles.pdf") as pdf: - pdf.savefig(page[0]) - pdf.savefig(page[1]) - pdf.savefig(page[2]) - pdf.savefig(page[3]) - plt.show(page[0]) diff --git a/utilities/convert_in_dat.py b/utilities/convert_in_dat.py deleted file mode 100755 index 0773f579..00000000 --- a/utilities/convert_in_dat.py +++ /dev/null @@ -1,47 +0,0 @@ -""" - - Script to convert IN.DAT into new format - - James Morris - CCFE - 19/11/15 - -""" - -# Third-party libraries -import argparse - -# Local libraries -import process.io.in_dat as indat - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser( - description="PROCESS IN.DAT converter. " - "Convert IN.DAT into new " - "format.\n" - "For info contact " - "james.morris2@ccfe.ac.uk" - ) - - parser.add_argument( - "-f", - metavar="f", - type=str, - default="IN.DAT", - help='File to read as IN.DAT (default="IN.DAT")', - ) - - parser.add_argument( - "-o", - metavar="o", - type=str, - default="new_IN.DAT", - help='File to read as IN.DAT (default="new_IN.DAT")', - ) - - args = parser.parse_args() - - i = indat.InDat(filename=args.f) - i.write_in_dat(output_filename=args.o) diff --git a/utilities/cost_bar.py b/utilities/costs_bar.py old mode 100755 new mode 100644 similarity index 99% rename from utilities/cost_bar.py rename to utilities/costs_bar.py index a3f1dd9a..ad594249 --- a/utilities/cost_bar.py +++ b/utilities/costs_bar.py @@ -342,7 +342,6 @@ def comp_step(): # Main code if __name__ == "__main__": - # Setup command line arguments parser = argparse.ArgumentParser( description="Displays the cost breakdown as a bar chart. " diff --git a/utilities/cost_pie.py b/utilities/costs_pie.py old mode 100755 new mode 100644 similarity index 99% rename from utilities/cost_pie.py rename to utilities/costs_pie.py index 4285c89b..239e776b --- a/utilities/cost_pie.py +++ b/utilities/costs_pie.py @@ -288,7 +288,6 @@ def step_cost_model(): # Main code if __name__ == "__main__": - # Setup command line arguments parser = argparse.ArgumentParser( description="Displays the cost breakdown as a pie chart. " diff --git a/utilities/create_csv4database.py b/utilities/create_csv4database.py deleted file mode 100755 index d497fe5f..00000000 --- a/utilities/create_csv4database.py +++ /dev/null @@ -1,335 +0,0 @@ -#! /usr/bin/env python -""" -Utility to create csv-file for PROCESS runs database. -THIS SHOULD NOT BE EDITED WITHOUT PRIOR CONSULTATION OF FRANCESCO MAVIGLIA -WHO MAINTAINS THE PRCOESS RUNS DATABASE!!!! - -Input: MFILE.DAT -Output: process_summary.txt -Optional arguments: - # change the input file name - create_csv4database.py -f MFILE.DAT - # change the output file name - create_csv4database.py -o out.txt - -Based on plot_proc.py by James Morris -Initial version output_data.py by Richard Kemp, 08/10/2014 -16/12/16 HL: separated utility to conserve format for PROCESS runs database -""" - - -import argparse -import process.io.mfile as mf -from dict_tools import proc_dict - - -def write_data(data, mfile_data, outfile, scan): - """Function to write output to file. - - Arguments: - data --> list of data to write - m_file_data --> MFILE.DAT data to read - outfile --> output file reference - scan --> scan to read from MFILE.DAT - - """ - for i in range(len(data)): - if isinstance(data[i][0], str): - if mfile_data.data[data[i][0]].exists: - dat = mfile_data.data[data[i][0]].get_scan(scan) - if isinstance(dat, str): - value = dat - else: - value = "%.4g" % mfile_data.data[data[i][0]].get_scan(scan) - if "alpha" in data[i][0]: - value = str(float(value) + 1.0) - if isinstance(dat, str): - outfile.write('"' + data[i][1] + '", "' + value + '"\n') - else: - outfile.write('"' + data[i][1] + '", ' + value + "\n") - else: - mfile_data.data[data[i][0]].get_scan(-1) - # Write the data label even if the data is not available. - outfile.write('"' + data[i][1] + '", ' + "value missing!" + "\n") - # outfile.write("value missing!\n") - else: - dat = data[i][0] - if isinstance(dat, str): - value = dat - else: - value = "%.4g" % data[i][0] - outfile.write('"' + data[i][1] + '", ' + value + "\n") - - -def main(mfile_data, output_file, scan=-1): - """Function to output summary data for insertion into spreadsheet. - - Arguments: - m_file_data --> MFILE.DAT data to read - output_file --> file to write to - scan --> scan to read from MFILE.DAT - - """ - - general = [ - ("procver", "PROCESS_Version"), - ("date", "Date:"), - ("time", "Time:"), - ("username", "User:"), - ] - - in_blanket_thk = mfile_data.data["shldith"].get_scan(scan) + mfile_data.data[ - "blnkith" - ].get_scan(scan) - out_blanket_thk = mfile_data.data["shldoth"].get_scan(scan) + mfile_data.data[ - "blnkoth" - ].get_scan(scan) - - geometry = [ - ("rmajor", "R_0"), - ("rminor", "a"), - ("aspect", "A"), - ("kappa95", "kappa_95"), - ("triang95", "delta_95"), - ("sarea", "Surface area"), - ("vol", "Plasma volume"), - ("n_tf", "No. of TF coils"), - (in_blanket_thk, "i/b blkt/shld"), - (out_blanket_thk, "o/b blkt/shld"), - ("powfmw", "Fusion power"), - ] - - nong = mfile_data.data["dnla"].get_scan(scan) / mfile_data.data[ - "dlimit(7)" - ].get_scan(scan) - dnz = mfile_data.data["dnz"].get_scan(scan) / mfile_data.data["dene"].get_scan(scan) - tepeak = mfile_data.data["te0"].get_scan(scan) / mfile_data.data["te"].get_scan( - scan - ) - - nepeak = mfile_data.data["ne0"].get_scan(scan) / mfile_data.data["dene"].get_scan( - scan - ) - - physics = [ - ("plascur/1d6", "I_p"), - ("bt", "Vacuum B_T"), - ("q95", "q95"), - ("normalised thermal beta", "beta_N, thermal"), - ("normalised total beta", "beta_N, total"), - ("thermal poloidal beta", "beta_P, thermal"), - ("betap", "beta_P, total"), - ("te", ""), - ("dene", ""), - (nong, "/n_G"), - (tepeak, "T_e0/"), - (nepeak, "n_e0/"), - ("zeff", "Z_eff"), - ("zeffso", "Z_eff, SoL"), - (dnz, "n_Z/n_e"), - ("taueff", "tau_e"), - ("hfact", "H-factor"), - ("tauelaw", "Scaling law"), - ] - - # dnla = mfile_data.data["dnla"].get_scan(scan)/1.0e20 - # bt = mfile_data.data["bt"].get_scan(scan) - # surf = mfile_data.data["sarea"].get_scan(scan) - pthresh = mfile_data.data["pthrmw(6)"].get_scan(scan) - # err = pthresh - mfile_data.data["pthrmw(7)"].get_scan(scan) - gross_eff = 100.0 * ( - mfile_data.data["pgrossmw"].get_scan(scan) - / mfile_data.data["pthermmw"].get_scan(scan) - ) - net_eff = 100.0 * ( - ( - mfile_data.data["pgrossmw"].get_scan(scan) - - mfile_data.data["htpmw"].get_scan(scan) - ) - / ( - mfile_data.data["pthermmw"].get_scan(scan) - - mfile_data.data["htpmw"].get_scan(scan) - ) - ) - plant_eff = 100.0 * ( - mfile_data.data["pnetelmw"].get_scan(scan) - / mfile_data.data["powfmw"].get_scan(scan) - ) - - power_flows = [ - ("wallmw", "Av. neutron wall load"), - ("pinnerzoneradmw", "inner zone radiation"), - ("psyncpv*vol", "Synchrotron radiation"), - ("pouterzoneradmw", "outer zone radiation"), - ("pnucblkt", "Nuclear heating in blanket"), - ("pnucshld", "Nuclear heating in shield"), - ("pdivt", "Psep / Pdiv"), - (pthresh, "H-mode threshold (M=2.5)"), - ("fwbllife", "FW/Blanket life"), - ("divlife", "Divertor life"), - ("pthermmw", "Thermal Power"), - (gross_eff, "Gross cycle efficiency"), - (net_eff, "Net cycle efficiency"), - ("pgrossmw", "Gross electric power"), - ("pnetelmw", "Net electric power"), - (plant_eff, "Plant efficiency Pe/Pfus"), - ] - - pinjie = mfile_data.data["pinjmw"].get_scan(scan) - pdivt = mfile_data.data["pdivt"].get_scan(scan) - pdivr = pdivt / mfile_data.data["rmajor"].get_scan(scan) - pdivnr = ( - 10.0e20 - * mfile_data.data["pdivt"].get_scan(scan) - / ( - mfile_data.data["rmajor"].get_scan(scan) - * mfile_data.data["dene"].get_scan(scan) - ) - ) - # dnla = mfile_data.data["dnla"].get_scan(scan)/1.0e20 - # bt = mfile_data.data["bt"].get_scan(scan) - # surf = mfile_data.data["sarea"].get_scan(scan) - pthresh = mfile_data.data["pthrmw(6)"].get_scan(scan) - flh = pdivt / pthresh - powerht = mfile_data.data["powerht"].get_scan(scan) - psync = mfile_data.data["psyncpv*vol"].get_scan(scan) - pbrem = mfile_data.data["pinnerzoneradmw"].get_scan(scan) - hfact = mfile_data.data["hfact"].get_scan(scan) - hstar = hfact * (powerht / (powerht + psync + pbrem)) ** 0.31 - - current_drive = [ - (pinjie, "SS auxiliary power"), - ("pheat", "Power for heating only"), - ("bootipf", "Bootstrap fraction"), - ("faccd", "Auxiliary fraction"), - ("facoh", "Ohmic fraction"), - ("gamnb", "NB gamma"), - ("enbeam", "NB energy"), - ("powerht", "Assumed heating power"), - (pdivr, "Pdiv/R"), - (pdivnr, "Pdiv/(n R)"), - (flh, "Pdiv/PLH"), - (hstar, "H* (non-rad. corr.)"), - ] - - # Number of coils (1 is OH coil) - number_of_coils = 0 - for item in mfile_data.data.keys(): - if "rpf(" in item: - number_of_coils += 1 - pf_info = [] - for i in range(1, number_of_coils): - if i % 2 != 0: - pf_info.append( - ( - mfile_data.data["ric(%s)" % str(i).zfill(2)].get_scan(scan), - "PF %s" % str(i), - ) - ) - tburn = mfile_data.data["tburn"].get_scan(scan) / 3600.0 - # tftype = proc_dict.DICT_TF_TYPE[mfile_data.data["i_tf_sc_mat"].get_scan(scan)] - vssoft = mfile_data.data["vsres"].get_scan(scan) + mfile_data.data[ - "vsind" - ].get_scan(scan) - - coil_currents = [ - (pf_info[0][0], pf_info[0][1]), - (pf_info[1][0], pf_info[1][1]), - (pf_info[2][0], pf_info[2][1]), - (vssoft, "Startup flux swing"), - ("vstot", "Available flux swing"), - (tburn, "Burn time"), - ("bmaxtf", "Peak field at conductor"), - ("iooic", r"I/I$_{\mathrm{crit}}$"), - ("tmarg", "Temperature margin"), - ("sig_tf_case", "TF case maximum shear stress (Tresca criterion)"), - ("sig_tf_wp", "TF conduit maximum shear stress (Tresca criterion)"), - ( - "sig_tf_case_max", - "Allowable maximum shear stress in the TF case (Tresca criterion)", - ), - ( - "sig_tf_wp_max", - "Allowable maximum shear stress in the TF conduit (Tresca criterion)", - ), - ] - - costs = [ - ("coe", "Cost of electricity"), - ("concost", "Constructed cost"), - ("capcost", "Total capex"), - ] - - # open file for writing - ofile = open(output_file, "w") - - opt = proc_dict.DICT_OPTIMISATION_VARS[ - abs(int(mfile_data.data["minmax"].get_scan(scan))) - ] - ofile.write('"GENERAL"\n') - write_data(general, mfile_data, ofile, scan) - ofile.write('"optimising:", "' + opt + '"\n') - ofile.write("\n") - - ofile.write('"GEOMETRY"\n') - write_data(geometry, mfile_data, ofile, scan) - ofile.write("\n") - - ofile.write('"PHYSICS"\n') - write_data(physics, mfile_data, ofile, scan) - ofile.write("\n") - - ofile.write('"POWER_FLOWS"\n') - write_data(power_flows, mfile_data, ofile, scan) - ofile.write("\n") - - ofile.write('"CURRENT_DRIVE"\n') - write_data(current_drive, mfile_data, ofile, scan) - ofile.write("\n") - - ofile.write('"COIL_CURRENTS"\n') - write_data(coil_currents, mfile_data, ofile, scan) - ofile.write("\n") - - ofile.write('"COSTS"\n') - write_data(costs, mfile_data, ofile, scan) - ofile.write("\n") - - # close file - ofile.close() - - -if __name__ == "__main__": - - # Setup command line arguments - PARSER = argparse.ArgumentParser( - description="Produce a single-column " - "comma-separated (.txt) summary. " - "For info contact " - "rich.kemp@ukaea.ac.uk or " - "james.morris2@ukaea.ac.uk" - ) - - PARSER.add_argument( - "-f", - metavar="FILENAME", - type=str, - default="MFILE.DAT", - help="specify input filename", - ) - - PARSER.add_argument( - "-o", - metavar="OUTPUT", - type=str, - default="process_summary.txt", - help="specify output filename", - ) - - ARGS = PARSER.parse_args() - - # read MFILE - MFILE = mf.MFile(ARGS.f) - - # run main - main(MFILE, ARGS.o) diff --git a/utilities/diagnose_process.py b/utilities/diagnose_process.py deleted file mode 100755 index ec799935..00000000 --- a/utilities/diagnose_process.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python - -""" -Code to produce several plots to aid the diagnosis of a PROCESS run. - -Author: H. Lux (Hanni.Lux@ccfe.ac.uk) - -Input files: -MFILE.DAT - -Output files: - - -Notes: -19/08/2014 HL initial version of this program - - -Compatible with PROCESS version 319 -""" - -####################### -# imported libraries - -import argparse -from process_io_lib.diagnose_funcs import plot_normalised_ixc, plot_normalised_icc_res -from pylab import show - - -if __name__ == "__main__": - ############################################################ - # Usage - - PARSER = argparse.ArgumentParser( - description="Program to diganose\ - a PROCESS run." - ) - - PARSER.add_argument( - "-f", "--mfile", default="MFILE.DAT", help=" mfile, default = MFILE.DAT" - ) - - ARGS = PARSER.parse_args() - - ############################################################ - # main program - - plot_normalised_ixc(ARGS.mfile) - - plot_normalised_icc_res(ARGS.mfile) - - show() diff --git a/utilities/diagnose_uncertainties.py b/utilities/diagnose_uncertainties.py deleted file mode 100755 index 2291e56c..00000000 --- a/utilities/diagnose_uncertainties.py +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/env python -""" -Code to display the final uncertain input parameter distributions in comparison -to the specified input distributions - -Author: H. Lux (Hanni.Lux@ccfe.ac.uk) - -Input file: -uncertainties.nc -evaluate_uncertainties.json - -""" - - -####################### -# imported libraries - - -from matplotlib import rc -import argparse -from scipy.stats import norm -from numpy import linspace, argwhere, logical_or, zeros, mean -from matplotlib.ticker import NullFormatter -from pylab import figure, hist, show, savefig, gca, plot, xlabel -from sys import stderr -from process_io_lib.process_config import UncertaintiesConfig -from process_io_lib.process_dicts import DICT_INPUT_BOUNDS - -rc("font", size=25) -rc("lines", lw=1.5) # is overwritten by setting coulours/linestylies -rc(("xtick", "ytick"), labelsize=20) -rc("figure", figsize=(8, 6)) -rc("figure.subplot", bottom=0.18) # was 0.12 -rc("figure.subplot", left=0.19) -rc("figure.subplot", right=0.81) - - -def plot_distribution(xarr, labelx, unc_dict): - - """function to create a 2d scatter plot with histograms""" - - figure() - - # no labels - gca().yaxis.set_major_formatter(NullFormatter()) - xlabel(labelx) - - (n_arr, bins, patches) = hist(xarr, bins=10, normed=True) - - if unc_dict["errortype"].lower() == "gaussian": - u_mean = unc_dict["mean"] - u_std = unc_dict["std"] - xvalues = linspace(min(xarr), max(xarr), 500) - yvalues = norm.pdf(xvalues, u_mean, u_std) - # assures values are inside input bounds! - if varname in DICT_INPUT_BOUNDS: - args = argwhere( - logical_or( - xvalues < DICT_INPUT_BOUNDS[varname]["lb"], - xvalues > DICT_INPUT_BOUNDS[varname]["ub"], - ) - ) - yvalues[args] = zeros(args.shape) - - else: # cutoff at 0 - typically negative values are meaningless - args = argwhere(xvalues < 0.0) - yvalues[args] = zeros(args.shape) - - elif unc_dict["errortype"].lower() == "uniform": - lbound = unc_dict["lowerbound"] - ubound = unc_dict["upperbound"] - xvalues = linspace(lbound, ubound, 10) - yvalues = [mean(n_arr)] * 10 - - elif unc_dict["errortype"].lower() == "relative": - err = unc_dict["percentage"] / 100.0 - lbound = unc_dict["mean"] * (1.0 - err) - ubound = unc_dict["mean"] * (1.0 + err) - xvalues = linspace(lbound, ubound, 10) - yvalues = [mean(n_arr)] * 10 - - elif unc_dict["errortype"].lower() == "lowerhalfgaussian": - u_mean = unc_dict["mean"] - u_std = unc_dict["std"] - xvalues = linspace(min(xarr), max(xarr), 500) - yvalues = norm.pdf(xvalues, u_mean, u_std) - # to correct normalisation for half Gaussian - yvalues = yvalues * 2.0 - if varname in DICT_INPUT_BOUNDS: - args = argwhere( - logical_or(xvalues < DICT_INPUT_BOUNDS[varname]["lb"], xvalues > u_mean) - ) - yvalues[args] = zeros(args.shape) - - else: - args = argwhere(logical_or(xvalues < 0.0, xvalues > u_mean)) - yvalues[args] = zeros(args.shape) - - elif unc_dict["errortype"].lower() == "upperhalfgaussian": - u_mean = unc_dict["mean"] - u_std = unc_dict["std"] - xvalues = linspace(min(xarr), max(xarr), 500) - yvalues = norm.pdf(xvalues, u_mean, u_std) - # to correct normalisation for half Gaussian - yvalues = yvalues * 2.0 - if varname in DICT_INPUT_BOUNDS: - args = argwhere( - logical_or(xvalues < u_mean, xvalues > DICT_INPUT_BOUNDS[varname]["ub"]) - ) - yvalues[args] = zeros(args.shape) - else: - args = argwhere(xvalues < u_mean) - yvalues[args] = zeros(args.shape) - - plot(xvalues, yvalues, "k-") - - -if __name__ == "__main__": - - ########################################################### - # Usage - - PARSER = argparse.ArgumentParser( - description="Program to check\ - the final uncertainty distributions in the input parameters." - ) - - PARSER.add_argument( - "-e", - "--end", - default="pdf", - help="file format default =\ -pdf", - ) - - PARSER.add_argument( - "-u", - "--uncertainties", - default="evaluate_uncertainties.json", - help="uncertainties config file \ -default = evaluate_uncertainties.json", - ) - - PARSER.add_argument( - "-f", - "--filename", - default="uncertainties.nc", - help="uncertainties data file, default =\ -uncertainties.nc", - ) - ARGS = PARSER.parse_args() - - FILENAME = ARGS.filename - - ############################################################ - # main program - - from process_io_lib.process_netcdf import NetCDFReader - - CONFIG = UncertaintiesConfig(ARGS.uncertainties) - - if CONFIG.uncertainties != []: - - with NetCDFReader(FILENAME) as ncdf_reader: - - for u_dict in CONFIG.uncertainties: - varname = u_dict["varname"].lower() - - XARR = [] - for datadict in ncdf_reader.run_dicts(start_run=0): - try: - XARR += [datadict[varname]] - except KeyError: - if varname == "fimp(14)": - try: - XARR += [datadict["fimp(14"]] - except KeyError: - print( - "Error: Variable", - varname, - "can currently not be treated!\n", - "Check separately! \n", - list(datadict.keys()), - file=stderr, - ) - break - else: - print( - "Error: Variable", - varname, - "can currently not be treated!\n", - "Check separately! \n", - list(datadict.keys()), - file=stderr, - ) - break - - if XARR != []: - plot_distribution(XARR, varname, u_dict) - savefig("Uncertainties_Diagnostic_" + varname + "." + ARGS.end) - print("Number of successful runs", len(XARR)) - - -show() diff --git a/utilities/dict_tools.py b/utilities/dict_tools.py deleted file mode 100644 index 6a9fcdfd..00000000 --- a/utilities/dict_tools.py +++ /dev/null @@ -1,16 +0,0 @@ -import importlib.util -import pathlib - -CURRENT_DIR = pathlib.Path(__file__).resolve().parent - - -def module_from_file(module_name, file_path): - spec = importlib.util.spec_from_file_location(module_name, file_path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module - - -proc_dict = module_from_file( - "create_dicts_config", CURRENT_DIR.parent / "scripts/create_dicts_config.py" -) diff --git a/utilities/fit_profile.py b/utilities/fit_profile.py deleted file mode 100755 index 723cad29..00000000 --- a/utilities/fit_profile.py +++ /dev/null @@ -1,275 +0,0 @@ -#!/usr/bin/env python -""" -Python Tool to fit a general temperature or density profile from an ascii table -using our pedestal parametrisation - -Author: H. Lux (Hanni.Lux@ccfe.ac.uk) - -Input file: -profile.txt (can be either density or temperature profile) -(should contain data in columns, comment lines start with #) - -Compatible with PROCESS version 379 -""" - -####################### -# imported libraries - -import argparse -from numpy import loadtxt, array, argsort, arange, argmin, linspace -from scipy.optimize import leastsq -from process_io_lib.profile_funcs import nresidual, tresidual, nprofile, tprofile -from pylab import figure, xlabel, ylabel, axvline, axhline, plot, show - -if __name__ == "__main__": - ############################################################ - # Usage - - PARSER = argparse.ArgumentParser( - description="Program to fit a\ - general temperature or density profile from an ascii table." - ) - - PARSER.add_argument( - "-f", - "--filename", - default="profile.txt", - help="ascii file containing data in columns,\ - default = profile.txt", - ) - - PARSER.add_argument( - "-r", - "--rho", - type=int, - default=0, - help="column of the normalised radius rho=r/a,\ - default = 0", - ) - - PARSER.add_argument( - "-n", - "--density", - type=int, - default=1, - help="column of the density profile,\ - default = 1", - ) - - PARSER.add_argument( - "-t", - "--temperature", - type=int, - default=2, - help="column of the temperature profile,\ - default = 2", - ) - - PARSER.add_argument( - "-rn", - "--rhopedn", - type=float, - default=0.9, - help="user defined initial guess of the density\ - pedestal position, if outside [0,1] starts at 0.9, default = 0.9", - ) - - PARSER.add_argument( - "-rt", - "--rhopedt", - type=float, - default=0.9, - help="user defined initial guess of the temperature\ - pedestal position, if outside [0,1], starts at 0.9, default = 0.9", - ) - - ARGS = PARSER.parse_args() - - USEDENSITY = True - USETEMPERATURE = True - - ##################################### - # input error handling - - try: - DATA = loadtxt(ARGS.filename, unpack=True) - except FileNotFoundError: - print("Error: There is no file called", ARGS.filename) - exit() - except ValueError: - print( - "Error: In", - ARGS.filename, - "all comment rows must\ - start with a #! Columns can only contain floats!", - ) - exit() - - if DATA.size == 0: - print("Error: There is no data in", ARGS.filename) - exit() - - try: - RHO = DATA[ARGS.rho] - except IndexError: - print( - "Error: The column for the normalised radius does not exist!\ - Remember to start counting at 0!" - ) - exit() - - try: - DEN = DATA[ARGS.density] - except IndexError: - print( - "Warning: The column for the density does not exist!\ - Remember to start counting at 0!" - ) - USEDENSITY = False - - try: - TEMP = DATA[ARGS.temperature] - except IndexError: - print( - "Warning: The column for the temperature does not exist!\ - Remember to start counting at 0!" - ) - USETEMPERATURE = False - - ################################################### - # assure sorted data - SORTED_INDS = argsort(RHO) - - if all(SORTED_INDS != arange(len(RHO))): - RHO = RHO[SORTED_INDS] - - if USEDENSITY: - DEN = DEN[SORTED_INDS] - - if USETEMPERATURE: - TEMP = TEMP[SORTED_INDS] - - # only use data between 0 and 1! - # TODO: Does not work, if values outside range are closer! - # However, this is better, for getting most accurate values at rho = 0,1! - INDZERO = argmin(abs(RHO)) - INDONE = argmin(abs(RHO - 1.0)) - RHO = RHO[INDZERO : INDONE + 1] - - if USEDENSITY: - DEN = DEN[INDZERO : INDONE + 1] - - if USETEMPERATURE: - TEMP = TEMP[INDZERO : INDONE + 1] - - ################################################### - - if USEDENSITY: - - DICTFIT = {"alphan": 2.0} - - if ARGS.rhopedn >= 0.0 and ARGS.rhopedn <= 1.0: - DICTFIT["rhopedn"] = ARGS.rhopedn - else: - DICTFIT["rhopedn"] = 0.9 - DICTFIT["n0"] = DEN[0] - DICTFIT["nsep"] = DEN[-1] - INDPED = argmin(abs(RHO - DICTFIT["rhopedn"])) - DICTFIT["nped"] = DEN[INDPED] - - LSQFIT = leastsq( - nresidual, - array([DICTFIT["alphan"], DICTFIT["rhopedn"]]), - args=(RHO, DEN, DICTFIT), - ) - if LSQFIT[-1] not in [1, 2, 3, 4]: - print("Error: Density fit has failed!") - print("leastsq return:", LSQFIT) - - else: - print("\nDensity profile parameters:") - print("----------------------------") - print("Density pedestal rhopedn:", LSQFIT[0][1]) - print("Core density n0:", DICTFIT["n0"]) - print("Pedestal density nped:", DICTFIT["nped"]) - print("Density at separatrix nsep:", DICTFIT["nsep"]) - print("Density peaking parameter alphan:", LSQFIT[0][0], "\n") - - RHOARR = linspace(0, 1, 100) - RHOPEDN = LSQFIT[0][1] - NCORE = DICTFIT["n0"] - NPED = DICTFIT["nped"] - NSEP = DICTFIT["nsep"] - ALPHAN = LSQFIT[0][0] - - figure() - xlabel(r"$\rho$") - ylabel(r"$n$") - axvline(RHOPEDN, label=r"$\rho_{ped}$") - axhline(NCORE, ls="--") - axhline(NPED, ls=":") - axhline(NSEP) - plot( - RHOARR, - [nprofile(x, RHOPEDN, NCORE, NPED, NSEP, ALPHAN) for x in RHOARR], - ) - plot(RHO, DEN, "kx") - - if USETEMPERATURE: - DICTFIT = {"alphat": 2.0, "betat": 2.0} - - if ARGS.rhopedt >= 0.0 and ARGS.rhopedt <= 1.0: - DICTFIT["rhopedt"] = ARGS.rhopedt - else: - DICTFIT["rhopedt"] = 0.9 - DICTFIT["t0"] = TEMP[0] - DICTFIT["tsep"] = TEMP[-1] - INDPED = argmin(abs(RHO - DICTFIT["rhopedt"])) - DICTFIT["tped"] = TEMP[INDPED] - - LSQFIT = leastsq( - tresidual, - array([DICTFIT["alphat"], DICTFIT["betat"], DICTFIT["rhopedt"]]), - args=(RHO, TEMP, DICTFIT), - ) - - if LSQFIT[-1] not in [1, 2, 3, 4]: - print("Error: Temperature fit has failed!") - print("leastsq return:", LSQFIT) - - else: - print("\nTemperature profile parameters:") - print("----------------------------") - print("Temperature pedestal rhopedt:", LSQFIT[0][2]) - print("Core temperature t0:", DICTFIT["t0"]) - print("Pedestal temperature tped:", DICTFIT["tped"]) - print("Temperature at separatrix tsep:", DICTFIT["tsep"]) - print("Temperature peaking parameter alphat:", LSQFIT[0][0]) - print("Second temperature exponent betat:", LSQFIT[0][1], "\n") - - RHOARR = linspace(0, 1, 100) - RHOPEDT = LSQFIT[0][2] - TCORE = DICTFIT["t0"] - TPED = DICTFIT["tped"] - TSEP = DICTFIT["tsep"] - ALPHAT = LSQFIT[0][0] - BETAT = LSQFIT[0][1] - - figure() - xlabel(r"$\rho$") - ylabel(r"$T$") - axvline(RHOPEDT, label=r"$\rho_{ped}$") - axhline(TCORE, ls="--") - axhline(TPED, ls=":") - axhline(TSEP) - plot( - RHOARR, - [ - tprofile(x, RHOPEDT, TCORE, TPED, TSEP, ALPHAT, BETAT) - for x in RHOARR - ], - ) - plot(RHO, TEMP, "kx") - - -show() diff --git a/utilities/in_dat_comparison.py b/utilities/in_dat_comparison.py deleted file mode 100755 index e1cfd13a..00000000 --- a/utilities/in_dat_comparison.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python -""" - PROCESS IN.DAT comparison tool - - Tool to compare two IN.DAT files and list which variable are in one and not - another and also compare parameters with different values. Finally list parameters - that are in both and the same - - James Morris 15/11/2016 - CCFE -""" - -# Third party libraries -import sys -import argparse - -# PROCESS libraries -from process.io.in_dat import InDat - -# Constants - -# *********************************** # - - -def compare_input_files(f1, f2): - """Compare - - :param f1: file 1 data object - :param f2: file 2 data object - """ - - file_1_keys = f1.data.keys() - file_2_keys = f2.data.keys() - - both = set(file_1_keys).intersection(file_2_keys) - both_same = [key for key in both if f1.data[key].value == f2.data[key].value] - both_diff = set(both).difference(both_same) - - one_not_two = set(file_1_keys).difference(file_2_keys) - two_not_one = set(file_2_keys).difference(file_1_keys) - - return both, both_same, both_diff, one_not_two, two_not_one - - -def output(kb, kbs, kbd, k1n2, k2n1, f1, f2, c_args): - """Output the differences between to the two input files to either - terminal or input_comp.txt - - :param kb: keys in both input files - :param kbs: keys in both input files with same value - :param kbd: keys in both input files with different value - :param k1n2: keys in input file 1 but not file 2 - :param k2n1: keys in input file 2 but not file 1 - :param f1: input file 1 - :param f2: input file 2 - :param c_args: command arguments - """ - - if c_args.save: - location = open("input_comp.txt", "w") - else: - location = sys.stdout - - print("\nPROCESS Input file comparison tool\n", file=location) - - print( - "\nThere are {0} variables in {1} NOT IN {2}:\n".format( - len(k1n2), c_args.f[0], c_args.f[1] - ), - file=location, - ) - print("\n".join("{0} : {1}".format(*k) for k in enumerate(k1n2)), file=location) - - print( - "\nThere are {0} variables in {1} NOT IN {2}:\n".format( - len(k2n1), c_args.f[1], c_args.f[0] - ), - file=location, - ) - print("\n".join("{0} : {1}".format(*k) for k in enumerate(k2n1)), file=location) - - print( - "\nThere are {0} variables in BOTH {1} AND {2} with the DIFFERENT values:\n".format( - len(kbd), c_args.f[0], c_args.f[1] - ), - file=location, - ) - print( - "\n".join( - "{0} : {1} \n\t {2} = {3} \n\t {4} = {5}\n".format( - k[0], - k[1], - c_args.f[0], - f1.data[k[1]].value, - c_args.f[1], - f2.data[k[1]].value, - ) - for k in enumerate(kbd) - ), - file=location, - ) - - print( - "\nThere are {0} variables in BOTH {1} AND {2} with the SAME values:\n".format( - len(kbs), c_args.f[0], c_args.f[1] - ), - file=location, - ) - print( - "\n".join( - "{0} : {1} \n\t {2} = {3} \n\t {4} = {5}\n".format( - k[0], - k[1], - c_args.f[0], - f1.data[k[1]].value, - c_args.f[1], - f2.data[k[1]].value, - ) - for k in enumerate(kbs) - ), - file=location, - ) - - -def main(cmd_args): - """Main - - :param cmd_args: command line arguments - """ - - # Get input file names - file_names = cmd_args.f - - # Read input files - file_1 = InDat(filename=file_names[0]) - file_2 = InDat(filename=file_names[1]) - - keys_both, keys_both_same, keys_both_diff, keys_1n2, keys_2n1 = compare_input_files( - file_1, file_2 - ) - - output( - keys_both, - keys_both_same, - keys_both_diff, - keys_1n2, - keys_2n1, - file_1, - file_2, - cmd_args, - ) - - return - - -if __name__ == "__main__": - - PARSER = argparse.ArgumentParser( - description="Produce a comparison " - "between two PROCESS " - "input files." - "For info contact " - "james.morris2@ukaea.uk" - ) - - PARSER.add_argument("-f", metavar="f", type=str, nargs="+", help="Files to compare") - - PARSER.add_argument( - "-s", - "--save", - help="Save output to file called input_comp.txt", - action="store_true", - ) - - ARGS = PARSER.parse_args() - - main(ARGS) diff --git a/utilities/kallenbach.py b/utilities/kallenbach.py deleted file mode 100644 index 1df67b87..00000000 --- a/utilities/kallenbach.py +++ /dev/null @@ -1,118 +0,0 @@ -"""Contains some methods from the now-removed kallenbach_module.f90 file. - -Running this file will run the kallenbach_paper routine, which should probably -become a test at some point. -""" - -import logging -from pathlib import Path -import numpy - -from process.fortran import constants -from process.fortran import physics_variables -from process.fortran import div_kal_vars -from process.fortran import impurity_radiation_module -from process.fortran import divertor_ode - -from process.fortran import read_and_get_atomic_data -from process.fortran import plot_radiation - -from process.fortran import global_variables -from process.utilities import f2py_string_patch -from process.main import SingleRun - -logger = logging.getLogger(__name__) -# Logging handler for console output -s_handler = logging.StreamHandler() -s_handler.setLevel(logging.INFO) -logger.addHandler(s_handler) - -CURRENT_PATH = Path(__file__).resolve().parent - - -def kallenbach_paper(): - # TODO: this should probably be turned into a pytest at some point - - SingleRun.init_module_vars() - SingleRun.initialise() - - rmajor = 8.0e0 - rminor = 2.75e0 - bt = 4.00972e0 * (rmajor + rminor) / rmajor - plascur = 1.33542e0 * (2.0e0 * numpy.pi * rminor) / constants.rmu0 - q = 3.0e0 - t_target = 2.3e0 - q_target_total = 4.175e6 - target_angle = 30.0e0 - b_pol = 0.956e0 - physics_variables.tesep = 0.298 - div_kal_vars.target_spread = 7.0e-3 - div_kal_vars.netau_sol = 0.5e0 - div_kal_vars.lambda_q_omp = 0.002e0 - - # MDK Issue #494. - # Invert the equation for lcon to ensure lcon=100 for this test. - # lcon = lcon_factor * 0.395d0*pi*q*rmajor/lambda_omp**0.196 - div_kal_vars.lcon_factor = 100.0e0 / ( - 0.395e0 * numpy.pi * 3.0e0 * 8.0e0 / 0.002e0**0.196 - ) - - global_variables.fileprefix = f2py_string_patch.string_to_f2py_compatible( - global_variables.fileprefix, str(CURRENT_PATH) - ) - - logger.info( - f"""Divertor: Kallenbach 1D Model - - Major radius [m]: {rmajor} - Minor radius [m]: {rminor} - Toroidal field [T]: {bt} - Plasma current [A]: {plascur} - q95 [A]: {q} - """ - ) - - for i in range(1, impurity_radiation_module.nimp): - impurity_radiation_module.impurity_arr_frac[i] = 0 - - # Set the impurity array fraction of Nitrogen - # gives 0.04 in SOL, as in Kallenbach paper - impurity_radiation_module.impurity_arr_frac[4] = 8.0e-3 - - divertor_ode.divertor_kallenbach( - rmajor=rmajor, - rminor=rminor, - bt=bt, - plascur=plascur, - q=q, - verboseset=False, - ttarget=t_target, - qtargettotal=q_target_total, - targetangle=target_angle, - unit_test=False, - bp=b_pol, - outfile=constants.nout, - iprint=1, - ) - - read_and_get_atomic_data.plot_rates() - logger.info( - """Rate coefficients for deuterium - saved in "rate_coefficients.txt" - Compare to Figure 2 in Kallenbach 2016""" - ) - - plot_radiation.plot_lz() - logger.info( - """Radiative loss functions - saved in "radiative_loss_functions.txt" - Compare to Figure 3 in Kallenbach 2016.""" - ) - - plot_radiation.plot_z() - logger.info( - """Reads mean Z and mean Z^2 - saved in "mean_Z.tx" - Compare to plots such as He_z.ps etc in /home/mkovari/sol/kallenbach/divertor_ode/LZ_NON_CORONA.""" - ) - - -if __name__ == "__main__": - kallenbach_paper() diff --git a/utilities/kallenbach_plotting.py b/utilities/kallenbach_plotting.py deleted file mode 100755 index 61bbdf3a..00000000 --- a/utilities/kallenbach_plotting.py +++ /dev/null @@ -1,406 +0,0 @@ -#!/usr/bin/env python - -import argparse -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.backends.backend_pdf as bpdf -from matplotlib.ticker import NullFormatter - -nullfmt = NullFormatter() # no labels - -parser = argparse.ArgumentParser( - description="Plot plasma and neutral profiles \ - in SOL for Kallenbach 1D divertor model" -) - -parser.add_argument("-f", metavar="f", type=str, help="File to read") - -args = parser.parse_args() - -# If user has specified a filename, -if args.f: - filename = args.f -else: - filename = "output_divertor.txt" - -f = open(filename, "r") -lines = f.readlines() -n = len(lines[8:]) - -# get headings -headings = list(item.strip("\n") for item in lines[7].split(" ") if item != "") -m = len(headings) -# print(headings) - -per_row = [] -for line in lines[8:]: - per_row.append([it.strip("\n") for it in line.split(" ") if it != ""]) - -per_column = list(zip(*per_row)) - -# Plotting -# 0 - steps -# 1 - x//b -# 2 - te -# 3 - ne -# 4 - Ptherm -# 5 - Ptotal -# 6 - v -# 7 - mach -# 8 - n0 -# 9 - power -# 10 - perparea -# 11 - qtot -# 12 - qconv -# 13 - qcond -# 14 - CX -# 15 - Ion -# 16 - Hrad -# 17 - imrad -# 18 - y7 -# 19 - y8 -# 20 - y9 -# 21 - y10 -# 22 - He -# 23 - Be -# 24 - C -# 25 - N -# 26 - O -# 27 - Ne -# 28 - Si -# 29 - Ar -# 30 - Fe -# 31 - Ni -# 32 - Kr -# 33 - Xe -# 34 - W -# 35 - n01/1e20 -# 36 - n02/1e20 -# 37 - nv24 -# 38 - v - -# First page -page1 = plt.figure(figsize=(10, 9)) -page1.subplots_adjust(hspace=0.05, wspace=0.0) - -# Second page plots -page2 = plt.figure(figsize=(10, 9)) -page2.subplots_adjust(hspace=0.05, wspace=0.0) - -# find max x||b -x_max = float(per_column[1][-1]) - -# Create numpy array of connection length -xpar = np.array([float(x) for x in per_column[1]]) -xpar[0] = 99999 - - -# convert to MW -cx_mw = [float(x) / 1e6 for x in per_column[14]] -ion_mw = [float(x) / 1e6 for x in per_column[15]] -hrad_mw = [float(x) / 1e6 for x in per_column[16]] -im_mw = [float(x) / 1e6 for x in per_column[17]] -# Power loss integrals are already in MW -y7_mw = np.array([float(x) for x in per_column[18]]) -y8_mw = np.array([float(x) for x in per_column[19]]) -y9_mw = np.array([float(x) for x in per_column[20]]) -y10_mw = np.array([float(x) for x in per_column[21]]) - -He_mw = [float(x) / 1e6 for x in per_column[22]] -Be_mw = [float(x) / 1e6 for x in per_column[23]] -C_mw = [float(x) / 1e6 for x in per_column[24]] -N_mw = [float(x) / 1e6 for x in per_column[25]] -O_mw = [float(x) / 1e6 for x in per_column[26]] -Ne_mw = [float(x) / 1e6 for x in per_column[27]] -Si_mw = [float(x) / 1e6 for x in per_column[28]] -Ar_mw = [float(x) / 1e6 for x in per_column[29]] -Fe_mw = [float(x) / 1e6 for x in per_column[30]] -Ni_mw = [float(x) / 1e6 for x in per_column[31]] -Kr_mw = [float(x) / 1e6 for x in per_column[32]] -Xe_mw = [float(x) / 1e6 for x in per_column[33]] -W_mw = [float(x) / 1e6 for x in per_column[34]] -n01 = [float(x) for x in per_column[35]] # /1e20 m-3 -n02 = [float(x) for x in per_column[36]] # /1e20 m-3 -# Particle flux and velocity are both negative in the code -nv = [-float(x) for x in per_column[37]] # /1e24 sm-2 -v = [-float(x) for x in per_column[38]] # ms-1 - -# Total power emitted by the SOL does not include the ionisation loss -power_loss_integral = y7_mw + y8_mw + y9_mw -spherical_power_load = power_loss_integral / (4 * 3.142 * (xpar * 0.5) ** 2) - -# Left hand plots (odd numbers) -# Row 1 -p1r1left = page1.add_subplot(421) -p1r1left.semilogy(per_column[1], hrad_mw, label="H rad", ls="dashed") -p1r1left.semilogy(per_column[1], cx_mw, label="CX", ls="dotted") -p1r1left.semilogy(per_column[1], im_mw, label="Imp rad", ls="dashdot") -p1r1left.semilogy(per_column[1], ion_mw, label="Ionisation", ls="solid") -p1r1left.set_xlim([0.0, 0.015]) -# p1r1left.set_ylim(ymin=1) -p1r1left.xaxis.set_major_formatter(nullfmt) -p1r1left.set_ylabel("power dens. (MWm$^{-3}$)") - - -# Row 2, Left -p1r2left = page1.add_subplot(423) -p1r2left.plot( - per_column[1], per_column[5], label="Plasma pressure including kinetic energy term" -) -p1r2left.plot(per_column[1], per_column[4], label="Plasma thermal pressure") -p1r2left.set_xlim([0.0, 0.015]) -p1r2left.set_ylim(ymin=0) -# p1r2left.set_ylim(ymax=2700) -p1r2left.set_ylabel("pressure (Pa)") -# p1r2left.legend(loc=4, prop={'size': 12}) -p1r2left.tick_params(axis="y", labelsize="9") -p1r2left.xaxis.set_major_formatter(nullfmt) - -# Row 3, Left -p1r3left = page1.add_subplot(425) -p1r3left.semilogy(per_column[1], per_column[2], label="$T_e$") -p1r3left.set_xlim([0.0, 0.015]) -p1r3left.set_ylim(ymin=1.0) -p1r3left.xaxis.set_major_formatter(nullfmt) -p1r3left.set_ylabel("(eV)") - - -# Row 4, Left -p1r4left = page1.add_subplot(427) -p1r4left.semilogy(per_column[1], per_column[3], label="$n_e/10^{20}m^{-3}$") -p1r4left.semilogy(per_column[1], per_column[8], label="$n_0/10^{20}m^{-3}$") -p1r4left.semilogy(per_column[1], n01, label="$n_01/10^{20}m^{-3}$", ls="dashed") -p1r4left.semilogy(per_column[1], n02, label="$n_02/10^{20}m^{-3}$", ls="dashed") -p1r4left.semilogy(per_column[1], per_column[7], label="mach") -# p1r4left.set_ylim([0.1, 100]) -p1r4left.set_xlim([0.0, 0.015]) -p1r4left.set_ylim(ymin=0.01) -p1r4left.set_xlabel(r"$x\parallel B$ (m)", fontsize=14) - - -# Row 1, Right -p1r1right = page1.add_subplot(422) -p1r1right.loglog(per_column[1], hrad_mw, label="H rad", ls="dashed") -p1r1right.loglog(per_column[1], cx_mw, label="CX", ls="dotted") -p1r1right.loglog(per_column[1], im_mw, label="Imp rad", ls="dashdot") -p1r1right.loglog(per_column[1], ion_mw, label="Ionisation", ls="solid") -p1r1right.set_xlim([0.015, x_max]) -# p1r1right.set_ylim(ymin=1) -p1r1right.legend(loc=1, prop={"size": 10}) -# p1r1right.plot((x_max, x_max), (0.001, 10000), ls='dashed', color="black") -# no labels -p1r1right.xaxis.set_major_formatter(nullfmt) -p1r1right.yaxis.set_major_formatter(nullfmt) -p1r1right.set_ylim(p1r1left.get_ylim()) - -# Row 2, Right -p1r2right = page1.add_subplot(424) -p1r2right.semilogx( - per_column[1], per_column[5], label="Plasma pressure including kinetic energy term" -) -p1r2right.semilogx(per_column[1], per_column[4], label="Plasma thermal pressure") -p1r2right.set_xlim([0.015, x_max]) -p1r2right.set_ylim(ymin=0) -p1r2right.legend(loc=4, prop={"size": 10}) -p1r2right.tick_params(axis="y", labelsize="9") -# no labels -p1r2right.xaxis.set_major_formatter(nullfmt) -p1r2right.yaxis.set_major_formatter(nullfmt) -p1r2right.set_ylim(p1r2left.get_ylim()) - -# Row 3, Right -p1r3right = page1.add_subplot(426) -p1r3right.loglog(per_column[1], per_column[2], label="$T_e$") -p1r3right.set_xlim([0.015, x_max]) -# p1r3right.set_ylim([0.0, 350.0]) -p1r3right.set_ylim(ymin=1.0) -p1r3right.legend(loc=4, prop={"size": 10}) -# no labels -p1r3right.xaxis.set_major_formatter(nullfmt) -p1r3right.yaxis.set_major_formatter(nullfmt) -p1r3right.set_ylim(p1r3left.get_ylim()) - -# Row 4, Right -p1r4right = page1.add_subplot(428) -p1r4right.loglog(per_column[1], per_column[3], label="$n_e/10^{20}m^{-3}$") -p1r4right.loglog(per_column[1], per_column[8], label="$n_0/10^{20}m^{-3}$") -p1r4right.loglog(per_column[1], n01, label="$n_{01}/10^{20}m^{-3}$", ls="dashed") -p1r4right.loglog(per_column[1], n02, label="$n_{02}/10^{20}m^{-3}$", ls="dashed") -p1r4right.loglog(per_column[1], per_column[7], label="mach") -# p1r4right.set_ylim([0.1, 10]) -p1r4right.set_xlim([0.015, x_max]) -# p1r4right.set_ylim(ymin=0.1) -p1r4right.set_xlabel(r"$x\parallel B$ (m)", fontsize=14) -p1r4right.legend(loc=1, prop={"size": 10}) -# no labels -p1r4right.yaxis.set_major_formatter(nullfmt) -p1r4right.set_ylim(p1r4left.get_ylim()) - -# Second page plots -# Row 1 -# Left -p2r1left = page2.add_subplot(421) -p2r1left.plot( - per_column[1], power_loss_integral, label="Integrated power emission from SOL" -) -p2r1left.set_xlim([0.0, 0.015]) -ymax = power_loss_integral[-1] -ymax = round(ymax / 50 + 0.5) * 50 -# p2r1left.set_ylim([0, ymax]) -p2r1left.set_ylabel("(MW)") -p2r1left.xaxis.set_major_formatter(nullfmt) - -# Right -p2r1right = page2.add_subplot(422) -p2r1right.semilogx( - per_column[1], - power_loss_integral, - label="Integrated power emission from SOL\n=radiation + charge exchange", -) - -p2r1right.set_xlim([0.015, x_max]) -# p2r1right.set_ylim([0, ymax]) -# p2r1right.set_xlabel("$x\parallel B$ (m)", fontsize=14) -p2r1right.legend(loc=4, prop={"size": 10}) -# no labels -p2r1right.xaxis.set_major_formatter(nullfmt) -p2r1right.yaxis.set_major_formatter(nullfmt) -p2r1right.set_ylim(p2r1left.get_ylim()) - -# Row 2 -# Left -p2r2left = page2.add_subplot(423) -p2r2left.set_xlim([0.0, 0.015]) -p2r2left.set_ylim([0.1, 1000]) -# p2r2left.set_xlabel("$x\parallel B$ (m)", fontsize=14) -p2r2left.set_ylabel("power dens. (MWm$^{-3}$)") -if max(He_mw) > 0.001: - p2r2left.semilogy(per_column[1], He_mw, label="He", ls="solid") -if max(Be_mw) > 0.001: - p2r2left.semilogy(per_column[1], Be_mw, label="Be", ls="dashed") -if max(C_mw) > 0.001: - p2r2left.semilogy(per_column[1], C_mw, label="C", ls="dashdot") -if max(N_mw) > 0.001: - p2r2left.semilogy(per_column[1], N_mw, label="N", ls="dotted") -if max(O_mw) > 0.001: - p2r2left.semilogy(per_column[1], O_mw, label="O", ls="solid") -if max(Ne_mw) > 0.001: - p2r2left.semilogy(per_column[1], Ne_mw, label="Ne", ls="dashed") -if max(Si_mw) > 0.001: - p2r2left.semilogy(per_column[1], Si_mw, label="Si", ls="dashdot") -if max(Ar_mw) > 0.001: - p2r2left.semilogy(per_column[1], Ar_mw, label="Ar", ls="dotted") -if max(Fe_mw) > 0.001: - p2r2left.semilogy(per_column[1], Fe_mw, label="Fe", ls="solid") -if max(Ni_mw) > 0.001: - p2r2left.semilogy(per_column[1], Ni_mw, label="Ni", ls="dashed") -if max(Kr_mw) > 0.001: - p2r2left.semilogy(per_column[1], Kr_mw, label="Kr", ls="solid") -if max(Xe_mw) > 0.001: - p2r2left.semilogy(per_column[1], Xe_mw, label="Xe", ls="dashed") -if max(W_mw) > 0.001: - p2r2left.semilogy(per_column[1], W_mw, label="W", ls="dashdot") -# p2r2left.legend(loc=1, prop={'size': 8}) -p2r2left.plot((x_max, x_max), (1, 10000), ls="dashed", color="black") -p2r2left.xaxis.set_major_formatter(nullfmt) - - -# Row 2 Right -p2r2right = page2.add_subplot(424) -p2r2right.set_xlim([0.015, x_max]) -p2r2right.set_ylim([0.1, 1000]) -# p2r2right.set_xlabel("$x\parallel B$ (m)", fontsize=14) -# p2r2right.set_ylabel("power dens. (MWm$^{-3}$)") -if max(He_mw) > 0.001: - p2r2right.loglog(per_column[1], He_mw, label="He", ls="solid") -if max(Be_mw) > 0.001: - p2r2right.loglog(per_column[1], Be_mw, label="Be", ls="dashed") -if max(C_mw) > 0.001: - p2r2right.loglog(per_column[1], C_mw, label="C", ls="dashdot") -if max(N_mw) > 0.001: - p2r2right.loglog(per_column[1], N_mw, label="N", ls="dotted") -if max(O_mw) > 0.001: - p2r2right.loglog(per_column[1], O_mw, label="O", ls="solid") -if max(Ne_mw) > 0.001: - p2r2right.loglog(per_column[1], Ne_mw, label="Ne", ls="dashed") -if max(Si_mw) > 0.001: - p2r2right.loglog(per_column[1], Si_mw, label="Si", ls="dashdot") -if max(Ar_mw) > 0.001: - p2r2right.loglog(per_column[1], Ar_mw, label="Ar", ls="dotted") -if max(Fe_mw) > 0.001: - p2r2right.loglog(per_column[1], Fe_mw, label="Fe", ls="solid") -if max(Ni_mw) > 0.001: - p2r2right.loglog(per_column[1], Ni_mw, label="Ni", ls="dashed") -if max(Kr_mw) > 0.001: - p2r2right.loglog(per_column[1], Kr_mw, label="Kr", ls="solid") -if max(Xe_mw) > 0.001: - p2r2right.loglog(per_column[1], Xe_mw, label="Xe", ls="dashed") -if max(W_mw) > 0.001: - p2r2right.loglog(per_column[1], W_mw, label="W", ls="dashdot") -p2r2right.legend(loc=1, prop={"size": 10}) -# no labels -p2r2right.xaxis.set_major_formatter(nullfmt) -p2r2right.yaxis.set_major_formatter(nullfmt) -p2r2right.set_ylim(p2r2left.get_ylim()) - -# Row 3 Left -p2r3left = page2.add_subplot(425) -p2r3left.plot(per_column[1], nv, label="Plasma flux [$10^{24}m^{-2}s^{-1}$]") -p2r3left.set_xlim([0.0, 0.015]) -p2r3left.set_ylim(ymax=9.9) -ylim = p2r3left.get_ylim() - -p2r3left.xaxis.set_major_formatter(nullfmt) -# ymax = v[-1] -# ymax = round(ymax/50 + 0.5) * 50 -# p2r3left.set_ylim([0, ymax]) -# p2r3left.legend(loc=1, prop={'size': 8}) - -# Row 3 Right -p2r3right = page2.add_subplot(426) -p2r3right.semilogx(per_column[1], nv, label="Plasma flux [$10^{24}m^{-2}s^{-1}$]") -p2r3right.set_xlim([0.015, x_max]) -p2r3right.set_ylim([0, ylim[1]]) -p2r3right.legend(loc=1, prop={"size": 10}) -# no labels -p2r3right.xaxis.set_major_formatter(nullfmt) -p2r3right.yaxis.set_major_formatter(nullfmt) -p2r3right.set_ylim(p2r3left.get_ylim()) - -# Row 4 Left -p2r4left = page2.add_subplot(427) -p2r4left.plot(per_column[1], v, label="Plasma speed [ms$^{-1}$]") -p2r4left.set_xlim([0.0, 0.015]) -ylim = p2r4left.get_ylim() -p2r4left.tick_params(axis="y", labelsize="9") -p2r4left.set_xlabel(r"$x\parallel B$ (m)", fontsize=14) - -# Row 4 Right -p2r4right = page2.add_subplot(428) -p2r4right.semilogx(per_column[1], v, label="Plasma speed [ms$^{-1}$]") -p2r4right.set_xlim([0.015, x_max]) -p2r4right.set_xlabel(r"$x\parallel B$ (m)", fontsize=14) -p2r4right.set_ylim([0, ylim[1]]) -p2r4right.legend(loc=1, prop={"size": 10}) -p2r4right.tick_params(axis="y", labelsize="9") -# no labels -p2r4right.yaxis.set_major_formatter(nullfmt) -p2r4right.set_ylim(p2r4left.get_ylim()) - -# This should be called after all axes have been added -# Unfortunately it seems to override the hspace and wspace parameters. -# page1.tight_layout() -# page2.tight_layout() - -# Save as a single two-page file -with bpdf.PdfPages("1D profiles " + filename + ".pdf") as pdf: - pdf.savefig(page1) - pdf.savefig(page2) - -with bpdf.PdfPages("1D profiles " + filename + "p1.pdf") as pdf: - pdf.savefig(page1) -with bpdf.PdfPages("1D profiles " + filename + "p2.pdf") as pdf: - pdf.savefig(page2) - -plt.show(page1) -plt.show(page2) diff --git a/utilities/kallenbach_plotting2.py b/utilities/kallenbach_plotting2.py deleted file mode 100644 index 52baaf26..00000000 --- a/utilities/kallenbach_plotting2.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python - -import sys -import argparse -import numpy as np -import matplotlib - -if sys.platform == "darwin": - matplotlib.use("WxAgg") -import matplotlib.pyplot as plt -import matplotlib.backends.backend_pdf as bpdf -from matplotlib.ticker import NullFormatter - -nullfmt = NullFormatter() # no labels - -parser = argparse.ArgumentParser( - description="Plot plasma and neutral profiles \ - in SOL for Kallenbach 1D divertor model" -) - -parser.add_argument("-f", metavar="f", type=str, help="File to read") - -args = parser.parse_args() - -# If user has specified a filename, -if args.f: - filename = args.f -else: - filename = "output_divertor.txt" - -f = open(filename, "r") -lines = f.readlines() -n = len(lines[8:]) - -# get headings -headings = list(item.strip("\n") for item in lines[7].split(" ") if item != "") -m = len(headings) - -per_row = [] -for line in lines[8:]: - per_row.append([it.strip("\n") for it in line.split(" ") if it != ""]) - -per_column = list(zip(*per_row)) - -# Plotting -# 0 - steps -# 1 - x//b -# 2 - te -# 3 - ne -# 4 - Ptherm -# 5 - Ptotal -# 6 - v -# 7 - mach -# 8 - n0 -# 9 - power -# 10 - perparea -# 11 - qtot -# 12 - qconv -# 13 - qcond -# 14 - CX -# 15 - Ion -# 16 - Hrad -# 17 - imrad -# 18 - y7 -# 19 - y8 -# 20 - y9 -# 21 - y10 -# 22 - He -# 23 - Be -# 24 - C -# 25 - N -# 26 - O -# 27 - Ne -# 28 - Si -# 29 - Ar -# 30 - Fe -# 31 - Ni -# 32 - Kr -# 33 - Xe -# 34 - W -# 35 - n01/1e20 -# 36 - n02/1e20 -# 37 - nv24 -# 38 - v - -# First page -page1 = plt.figure(figsize=(6, 9)) -page1.subplots_adjust(hspace=0.20, wspace=0.0) - -# Second page plots -page2 = plt.figure(figsize=(6, 9)) -page2.subplots_adjust(hspace=0.20, wspace=0.0) - -# find max x||b -x_max = float(per_column[1][-1]) - -# Create numpy array of connection length -xpar = np.array([float(x) for x in per_column[1]]) -# xpar[0] = 99999 - -# steps -steps = [int(x) for x in per_column[0]] - -density = [float(x) for x in per_column[3]] -target_temperature = [float(x) for x in per_column[2]] -plasma_thermal_pressure = [float(x) for x in per_column[4]] -plasma_pressure = [float(x) for x in per_column[5]] -mach = [float(x) for x in per_column[7]] -total_neutral_density = [float(x) for x in per_column[8]] - -# convert to MW -cx_mw = [float(x) / 1e6 for x in per_column[14]] -ion_mw = [float(x) / 1e6 for x in per_column[15]] -hrad_mw = [float(x) / 1e6 for x in per_column[16]] -im_mw = [float(x) / 1e6 for x in per_column[17]] -# Power loss integrals are already in MW -y7_mw = np.array([float(x) for x in per_column[18]]) -y8_mw = np.array([float(x) for x in per_column[19]]) -y9_mw = np.array([float(x) for x in per_column[20]]) -y10_mw = np.array([float(x) for x in per_column[21]]) - -He_mw = [float(x) / 1e6 for x in per_column[22]] -Be_mw = [float(x) / 1e6 for x in per_column[23]] -C_mw = [float(x) / 1e6 for x in per_column[24]] -N_mw = [float(x) / 1e6 for x in per_column[25]] -O_mw = [float(x) / 1e6 for x in per_column[26]] -Ne_mw = [float(x) / 1e6 for x in per_column[27]] -Si_mw = [float(x) / 1e6 for x in per_column[28]] -Ar_mw = [float(x) / 1e6 for x in per_column[29]] -Fe_mw = [float(x) / 1e6 for x in per_column[30]] -Ni_mw = [float(x) / 1e6 for x in per_column[31]] -Kr_mw = [float(x) / 1e6 for x in per_column[32]] -Xe_mw = [float(x) / 1e6 for x in per_column[33]] -W_mw = [float(x) / 1e6 for x in per_column[34]] -n01 = [float(x) for x in per_column[35]] # /1e20 m-3 -n02 = [float(x) for x in per_column[36]] # /1e20 m-3 -# Particle flux and velocity are both negative in the code -nv = [-float(x) for x in per_column[37]] # /1e24 sm-2 -v = [-float(x) for x in per_column[38]] # ms-1 - -# Total power emitted by the SOL does not include the ionisation loss -power_loss_integral = y7_mw + y8_mw + y9_mw -spherical_power_load = power_loss_integral / (4 * 3.142 * (xpar * 0.5) ** 2) - -# Row 1 -p1r1 = page1.add_subplot(411) -p1r1.loglog(xpar, hrad_mw, label="H rad", ls="dashed") -p1r1.loglog(xpar, cx_mw, label="CX", ls="dotted") -p1r1.loglog(xpar, im_mw, label="Imp rad", ls="dashdot") -p1r1.loglog(xpar, ion_mw, label="Ionisation", ls="solid") -p1r1.set_xlim(xmin=0.0002) -p1r1.set_xlim(xmax=x_max) -p1r1.set_ylim(ymin=1.0) -# p1r1.xaxis.set_major_formatter(nullfmt) -p1r1.set_ylabel("power dens. (MWm$^{-3}$)") -p1r1.legend(loc=1, prop={"size": 10}) - -# # Row 2, -p1r2 = page1.add_subplot(412) -p1r2.semilogx( - xpar, plasma_pressure, label="Plasma pressure including kinetic energy term" -) -p1r2.semilogx(xpar, plasma_thermal_pressure, label="Plasma thermal pressure") -p1r2.set_xlim(xmin=0.0002) -p1r2.set_xlim(xmax=x_max) -p1r2.set_ylim(ymin=0) -p1r2.set_ylabel("pressure (Pa)") -p1r2.legend(loc=4, prop={"size": 10}) -p1r2.tick_params(axis="y", labelsize="9") -# p1r2.xaxis.set_major_formatter(nullfmt) - -# Row 3, -p1r3 = page1.add_subplot(413) -p1r3.loglog(xpar, target_temperature, label="Temperature $T_e$") -p1r3.set_xlim(xmin=0.0002) -p1r3.set_xlim(xmax=x_max) -p1r3.set_ylim(ymin=1.0) -# p1r3.xaxis.set_major_formatter(nullfmt) -p1r3.set_ylabel("(eV)") -p1r3.legend(loc=4, prop={"size": 10}) - -# Row 4, -p1r4 = page1.add_subplot(414) -p1r4.loglog(xpar, density, label="$n_e/10^{20}m^{-3}$") -p1r4.loglog(xpar, total_neutral_density, label="$n_0/10^{20}m^{-3}$") -p1r4.loglog(xpar, n01, label="$n_{01}/10^{20}m^{-3}$", ls="dashed") -p1r4.loglog(xpar, n02, label="$n_{02}/10^{20}m^{-3}$", ls="dashed") -p1r4.loglog(xpar, mach, label="Mach") -p1r4.set_xlim(xmin=0.0002) -p1r4.set_xlim(xmax=x_max) -p1r4.set_ylim(ymin=0.01) -p1r4.tick_params(axis="x", labelsize="11") -p1r4.set_xlabel(r"Connection length from target $x_{\parallel}$ (m)", fontsize=10) -p1r4.legend(loc=4, prop={"size": 10}) - -# Second page plots------------------------------------ -# Row 1 -# Left -p2r1 = page2.add_subplot(411) -p2r1.semilogx( - xpar, - power_loss_integral, - label="Integrated power emission from" " SOL\n=radiation + charge exchange", -) -p2r1.set_xlim(xmin=0.0002) -p2r1.set_xlim(xmax=x_max) -ymax = power_loss_integral[-1] -ymax = round(ymax / 50 + 0.5) * 50 -# p2r1.set_ylim([0, ymax]) -p2r1.set_ylabel("(MW)") -# p2r1.xaxis.set_major_formatter(nullfmt) -p2r1.legend(loc=2, prop={"size": 10}) - -# Row 2# -p2r2 = page2.add_subplot(412) -p2r2.set_xlim(xmin=0.0002) -p2r2.set_xlim(xmax=x_max) -p2r2.set_ylim([0.1, 1000]) -p2r2.set_ylabel("power dens. (MWm$^{-3}$)") -if max(He_mw) > 0.001: - p2r2.loglog(xpar, He_mw, label="He", ls="solid") -if max(Be_mw) > 0.001: - p2r2.loglog(xpar, Be_mw, label="Be", ls="dashed") -if max(C_mw) > 0.001: - p2r2.loglog(xpar, C_mw, label="C", ls="dashdot") -if max(N_mw) > 0.001: - p2r2.loglog(xpar, N_mw, label="N", ls="dotted") -if max(O_mw) > 0.001: - p2r2.loglog(xpar, O_mw, label="O", ls="solid") -if max(Ne_mw) > 0.001: - p2r2.loglog(xpar, Ne_mw, label="Ne", ls="dashed") -if max(Si_mw) > 0.001: - p2r2.loglog(xpar, Si_mw, label="Si", ls="dashdot") -if max(Ar_mw) > 0.001: - p2r2.loglog(xpar, Ar_mw, label="Ar", ls="dotted") -if max(Fe_mw) > 0.001: - p2r2.loglog(xpar, Fe_mw, label="Fe", ls="solid") -if max(Ni_mw) > 0.001: - p2r2.loglog(xpar, Ni_mw, label="Ni", ls="dashed") -if max(Kr_mw) > 0.001: - p2r2.loglog(xpar, Kr_mw, label="Kr", ls="solid") -if max(Xe_mw) > 0.001: - p2r2.loglog(xpar, Xe_mw, label="Xe", ls="dashed") -if max(W_mw) > 0.001: - p2r2.loglog(xpar, W_mw, label="W", ls="dashdot") -p2r2.legend(loc=1, prop={"size": 10}) -p2r2.plot((x_max, x_max), (1, 10000), ls="dashed", color="black") -# p2r2.xaxis.set_major_formatter(nullfmt) - -# Row 3 -p2r3 = page2.add_subplot(413) -p2r3.semilogx(xpar, nv, label="Plasma flux [$10^{24}m^{-2}s^{-1}$]") -p2r3.set_xlim(xmin=0.0002) -p2r3.set_xlim(xmax=x_max) -# p2r3.xaxis.set_major_formatter(nullfmt) -p2r3.legend(loc=1, prop={"size": 10}) - -# Row 4 -p2r4 = page2.add_subplot(414) -p2r4.semilogx(xpar, v, label="Plasma speed [ms$^{-1}$]") -p2r4.set_xlim(xmin=0.0002) -p2r4.set_xlim(xmax=x_max) -p2r4.tick_params(axis="y", labelsize="9") -p2r4.tick_params(axis="x", labelsize="11") -p2r4.set_xlabel(r"Connection length from target $x_{\parallel}$ (m)", fontsize=10) -p2r4.legend(loc=1, prop={"size": 10}) - -# Save as a single two-page file -with bpdf.PdfPages("1D profiles " + filename + ".pdf") as pdf: - pdf.savefig(page1) - pdf.savefig(page2) - -page1.savefig("1D profiles " + filename + "p1.png") -page2.savefig("1D profiles " + filename + "p2.png") - -page1.savefig("1D profiles " + filename + "p1.eps") -page2.savefig("1D profiles " + filename + "p2.eps") -# with bpdf.PdfPages("1D profiles " + filename + "p1.pdf") as pdf: -# pdf.savefig(page1) -# with bpdf.PdfPages("1D profiles " + filename + "p2.pdf") as pdf: -# pdf.savefig(page2) - -plt.show(page1) -plt.show(page2) diff --git a/utilities/line_length_standard.py b/utilities/line_length_standard.py deleted file mode 100644 index eea4a51a..00000000 --- a/utilities/line_length_standard.py +++ /dev/null @@ -1,112 +0,0 @@ -""" - - Script to check line length of repository files - - Files include: - - *.f90 - - *.py - - *.tex - - James Morris - UKAEA - 26.09.19 - -""" - -import os - -# Get repository root directory -import pathlib -import time - -timeout = time.time() + 10 # 10 seconds -found_root = False -back = "" -while not found_root: - if time.time() > timeout: - print( - "Can't find repository root. Make sure utility is being run " - "inside a PROCESS repository clone" - ) - break - else: - my_file = pathlib.Path(back + ".gitignore") - if my_file.is_file(): - found_root = True - if back == "": - REPO_ROOT = "." - else: - REPO_ROOT = back - back += "../" - -# Set line limit -LINE_LIMIT = 100 - -# Paths and files to check -CHECK_LIST = { - "fortran": {"path": REPO_ROOT + "/source/fortran/", "extension": ".f90"}, - "python": {"path": REPO_ROOT + "/utilities/", "extension": ".py"}, - "latex": {"path": REPO_ROOT + "/documentation/", "extension": ".tex"}, -} - -if __name__ == "__main__": - - # Intro message - - intro = """ - - Checking line length standard (<100) lines. - - For the following locations - - - /source/fortran/*.f90 - - /source/utilities/*.py - - /source/documentation/*.tex - - J. Morris - UKAEA - 26.07.19 - - """ - - print(intro) - - # Set status - STATUS = True - LINE_COUNT = 0 - FILE_COUNT = 0 - - # Check each entry in CHECK_LIST - for key in CHECK_LIST.keys(): - print("") - print("# Checking {0} files".format(key)) - print("") - check_path = CHECK_LIST[key]["path"] - for filename in os.listdir(check_path): - if CHECK_LIST[key]["extension"] in filename: - FORT_STATUS = True - print("") - print(filename) - file_lines = open( - check_path + filename, "r", encoding="UTF-8" - ).readlines() - counter = 0 - for line in file_lines: - counter += 1 - if len(line) > LINE_LIMIT: - print( - "|-- {0:30} :: line :: {1:<5} :: length = {2:<10}".format( - filename, counter, len(line) - ) - ) - STATUS = False - LINE_COUNT += 1 - if FORT_STATUS: - FILE_COUNT += 1 - FORT_STATUS = False - - # if STATUS: - # sys.exit(0) - # else: - # sys.exit("\nERROR: Line length exceeded {0} times in {1} files". - # format(LINE_COUNT, FILE_COUNT)) diff --git a/utilities/make_plot_dat.conf b/utilities/make_plot_dat.conf deleted file mode 100644 index 7e76c7b8..00000000 --- a/utilities/make_plot_dat.conf +++ /dev/null @@ -1,12 +0,0 @@ -# make_plot_dat.out config file. -rmajor -aspect -rminor -bt -powfmw -pnetelmw -te -pdivt -s_tresca_cond -s_tresca_case -plascur/1D6 \ No newline at end of file diff --git a/utilities/make_plot_dat.py b/utilities/make_plot_dat.py deleted file mode 100755 index 834d44ba..00000000 --- a/utilities/make_plot_dat.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python - -""" - - PROCESS make make_plot_dat.out out of MFILE.DAT - - James Morris - CCFE - - Notes: - + 20/02/2014: Initial version created - + 12/03/2014: Updated to put into PyCharm - - Compatible with PROCESS version 274 - -""" - -import os -import argparse -import process.io.mfile as mf -from process.io.mfile import make_plot_dat - -# from process_io_lib.process_dicts import PARAMETER_DEFAULTS -from dict_tools import proc_dict - - -if __name__ == "__main__": - - # Setup command line arguments - parser = argparse.ArgumentParser( - description="Process MFILE.DAT into " "make_plot_dat.out." - ) - - parser.add_argument( - "-p", - metavar="p", - type=str, - nargs="+", - help="new variables for the output plot.dat", - ) - - parser.add_argument("-f", metavar="f", type=str, help="File to read as MFILE.DAT") - - parser.add_argument( - "--defaults", help="run with default params", action="store_true" - ) - - parser.add_argument( - "--reset-config", help="Reset make_plot_dat.conf", action="store_true" - ) - - parser.add_argument( - "--columns", help="Write make_plot_dat.out in columns", action="store_true" - ) - - parser.add_argument("--all", help="Output entire MFILE", action="store_true") - - args = parser.parse_args() - - # If user has specified a file that isn't MFILE.DAT pass the filename to - # MFILE() class. - if args.f: - M = mf.MFile(filename=args.f) - else: - M.data["rmajor"].get_scan(-1) - - # Get files in current directory to check for the config file. - current_directory = os.listdir(".") - if "make_plot_dat.conf" not in current_directory or args.reset_config: - mf.write_mplot_conf() - - # Read the config file. - INPUT_CONFIG = mf.read_mplot_conf() - - # If the user added new parameters in the command line add them to the - # INPUT_CONFIG list to pass to make_plot_dat() - if args.p: - conf_file = open("make_plot_dat.conf", "a") - for item in args.p: - if item not in INPUT_CONFIG: - conf_file.write(item + "\n") - conf_file.close() - INPUT_CONFIG = mf.read_mplot_conf() - - # If the user has requested the default parameters get default list from - # process_io_lib - if args.all: - make_plot_dat(M, M.data.keys()) - else: - if args.defaults: - - # If user has specified column format - if args.columns: - make_plot_dat( - M, - proc_dict.get_dicts()[proc_dict.PARAMETER_DEFAULTS], - file_format="column", - ) - - # If user has specified row format - else: - make_plot_dat(M, proc_dict.get_dicts()[proc_dict.PARAMETER_DEFAULTS]) - else: - - # If user has specified column format - if args.columns: - make_plot_dat(M, INPUT_CONFIG, file_format="column") - - # If user has specified row format - else: - make_plot_dat(M, INPUT_CONFIG) diff --git a/utilities/mcnp_output.py b/utilities/mcnp_output.py deleted file mode 100755 index 1e6dd41a..00000000 --- a/utilities/mcnp_output.py +++ /dev/null @@ -1,486 +0,0 @@ -#! /usr/bin/env python - -""" - - Program to output an MCNP compatible file from MFILE.DAT - - James Morris - - 13/06.2014 - - Notes: - + 13/06/2014 Original version created. - + 01/07/2014 Added option for CTF - + 09/09/2014 Added extra TF coil information -""" - -import argparse -from process.io.mfile import MFile - - -RADIAL_BUILD = [ - "bore", - "ohcth", - "gapoh", - "tfcth", - "gapds", - "d_vv_in", - "shldith", - "blnkith", - "fwith", - "scrapli", - "rminor", - "scraplo", - "fwoth", - "blnkoth", - "shldoth", - "gapsto", - "tfthko", -] - - -class ProcessEllipse(object): - """A class to store ellipse data""" - - def __init__(self, x, y, z): - self.x = x - self.y = y - self.z = z - return - - -class ProcessCylinder(object): - """A class to store ellipse data""" - - def __init__(self, dx, dy): - self.dx = dx - self.dy = dy - return - - -class ProcessPlane(object): - """A class to store ellipse data""" - - def __init__(self, dx, dy): - self.dx = dx - self.dy = dy - return - - -class ProcessTFArcs(object): - """A class to store TF arc data""" - - def __init__(self, c_x, c_y, x_1, y_1, x_2, y_2): - self.c_x = c_x - self.c_y = c_y - self.x_1 = x_1 - self.y_1 = y_1 - self.x_2 = x_2 - self.y_2 = y_2 - return - - -class InfoHolder(object): - """A Class to store information""" - - def __init__(self, name, value): - self.name = name - self.value = value - return - - -def main(cl_args): - """Main function for converting MFILE data into MCNP format - - Arguments: - cl_args --> command line arguments - - """ - - mfile_data = MFile(cl_args.f) - shapes = dict() - - if cl_args.ctf: - populate_ctf_ellipse_data(shapes, mfile_data) - else: - populate_tok_ellipse_data(shapes, mfile_data) - - write_shapes_to_file(shapes, cl_args.o) - - -def get_xyz(mfile_obj): - """Function to get x,y,z data for all ellipses""" - x = mfile_obj.data["rmajor"].get_scan(-1) - mfile_obj.data["rminor"].get_scan( - -1 - ) * mfile_obj.data["triang95"].get_scan(-1) - y = 0.0 - z = 0.0 - return x, y, z - - -def populate_ctf_ellipse_data(shape_objs, mf_data): - """Function to populate the ellipse objects with data for CTF case""" - - r_major = mf_data.data["rmajor"].get_scan(-1) - r_minor = mf_data.data["rminor"].get_scan(-1) - kappa_95 = mf_data.data["kappa95"].get_scan(-1) - delta_95 = mf_data.data["triang95"].get_scan(-1) - h_plasma = kappa_95 * r_minor - t_tf = mf_data.data["tfcth"].get_scan(-1) - t_scrape_o = mf_data.data["scraplo"].get_scan(-1) - t_fw_i = mf_data.data["fwith"].get_scan(-1) - t_fw_o = mf_data.data["fwoth"].get_scan(-1) - t_shield_i = mf_data.data["shldith"].get_scan(-1) - t_shield_o = mf_data.data["shldoth"].get_scan(-1) - t_blanket_o = mf_data.data["blnkoth"].get_scan(-1) - r_c = r_major - (r_minor * delta_95) - - # TF centre column - thickness_tf = t_tf - height_tf = h_plasma + t_scrape_o + t_fw_o + t_blanket_o + t_shield_o - shape_objs["c1"] = ProcessCylinder(thickness_tf, height_tf) - - # Inboard shield - thickness_shield = t_shield_i - height_shield = h_plasma + t_scrape_o + t_fw_o + t_blanket_o + t_shield_o - shape_objs["c2"] = ProcessCylinder(thickness_shield + thickness_tf, height_shield) - - # Inboard first wall - thickness_fw = t_fw_i - height_fw = h_plasma + t_scrape_o + t_fw_o + t_blanket_o + t_shield_o - shape_objs["c3"] = ProcessCylinder( - thickness_fw + thickness_tf + thickness_shield, height_fw - ) - - # Upper shield mid-section - horizontal_thickness_shield_u = r_c - (t_tf + t_shield_i + t_fw_i) - z_upper_shield_u = h_plasma + t_scrape_o + t_fw_o + t_blanket_o + t_shield_o - r_outer_shield_u = t_tf + t_shield_i + t_fw_i + horizontal_thickness_shield_u - shape_objs["p1"] = ProcessPlane(r_outer_shield_u, z_upper_shield_u) - - # Upper blanket mid-section - z_upper_blanket_u = h_plasma + t_scrape_o + t_fw_o + t_blanket_o - r_outer_blanket_u = t_tf + t_shield_i + t_fw_i + horizontal_thickness_shield_u - shape_objs["p2"] = ProcessPlane(r_outer_blanket_u, z_upper_blanket_u) - - # Upper first wall mid-section - z_upper_fw_u = h_plasma + t_scrape_o + t_fw_o - z_lower_fw_u = h_plasma + t_scrape_o - r_outer_fw_u = t_tf + t_shield_i + t_fw_i + horizontal_thickness_shield_u - shape_objs["p3"] = ProcessPlane(r_outer_fw_u, z_upper_fw_u) - - # Upper first wall mid-section lower edge - z_upper_fw_u = h_plasma + t_scrape_o + t_fw_o - z_lower_fw_u = h_plasma + t_scrape_o - r_outer_fw_u = t_tf + t_shield_i + t_fw_i + horizontal_thickness_shield_u - shape_objs["p4"] = ProcessPlane(r_outer_fw_u, z_lower_fw_u) - - # Lower first wall mid-section - z_upper_fw_l = -h_plasma - t_scrape_o - z_lower_fw_l = -h_plasma - t_scrape_o - t_fw_o - r_outer_fw_l = t_tf + t_shield_i + t_fw_i + horizontal_thickness_shield_u - shape_objs["p4"] = ProcessPlane(r_outer_fw_l, z_upper_fw_l) - - # Lower first wall mid-section - z_upper_fw_l = -h_plasma - t_scrape_o - z_lower_fw_l = -h_plasma - t_scrape_o - t_fw_o - r_outer_fw_l = t_tf + t_shield_i + t_fw_i + horizontal_thickness_shield_u - shape_objs["p5"] = ProcessPlane(r_outer_fw_l, z_lower_fw_l) - - # Lower blanket mid-section - z_lower_blanket_l = -h_plasma - t_scrape_o - t_fw_o - t_blanket_o - r_outer_blanket_l = t_tf + t_shield_i + t_fw_i + horizontal_thickness_shield_u - shape_objs["p6"] = ProcessPlane(r_outer_blanket_l, z_lower_blanket_l) - - # Lower shield mid-section - z_lower_shield_l = -h_plasma - t_scrape_o - t_fw_o - t_blanket_o - t_shield_o - r_outer_shield_l = t_tf + t_shield_i + t_fw_i + horizontal_thickness_shield_u - shape_objs["p7"] = ProcessPlane(r_outer_shield_l, z_lower_shield_l) - - # Ellipse info - x, y, z = get_xyz(mf_data) - - # Ellipse 1: outboard first wall inner edge - shape_objs["e1"] = ProcessEllipse(x, y, z) - shape_objs["e1"].a = (r_major - r_c) + r_minor + t_scrape_o - shape_objs["e1"].b = h_plasma + t_scrape_o - shape_objs["e1"].c = r_c - - # Ellipse 2: outboard blanket inner edge - shape_objs["e2"] = ProcessEllipse(x, y, z) - shape_objs["e2"].a = (r_major - r_c) + r_minor + t_scrape_o + t_fw_o - shape_objs["e2"].b = h_plasma + t_scrape_o + t_fw_o - shape_objs["e2"].c = r_c - - # Ellipse 3: outboard shield inner edge - shape_objs["e3"] = ProcessEllipse(x, y, z) - shape_objs["e3"].a = (r_major - r_c) + r_minor + t_scrape_o + t_fw_o + t_blanket_o - shape_objs["e3"].b = h_plasma + t_scrape_o + t_fw_o + t_blanket_o - shape_objs["e3"].c = r_c - - # Ellipse 4: outboard shield outer edge - shape_objs["e4"] = ProcessEllipse(x, y, z) - shape_objs["e4"].a = ( - (r_major - r_c) + r_minor + t_scrape_o + t_fw_o + t_blanket_o + t_shield_o - ) - shape_objs["e4"].b = h_plasma + t_scrape_o + t_fw_o + t_blanket_o + t_shield_o - shape_objs["e4"].c = r_c - - # Extra information - # TF coil inner leg thickness - name = "TF Coil inner leg thickness (m)" - value = t_tf - shape_objs["i1"] = InfoHolder(name, value) - - # TF coil inner leg thickness - name = "TF Coil outer leg thickness (m)" - value = mf_data.data["tfthko"].get_scan(-1) - shape_objs["i2"] = InfoHolder(name, value) - - # TF coil inner plasma facing case thickness - name = "TF Coil inner leg plasma facing case thickness (m)" - value = mf_data.data["casthi"].get_scan(-1) - shape_objs["i3"] = InfoHolder(name, value) - - # TF coil inner case thickness - name = "TF Coil inner case thickness (m)" - value = mf_data.data["thkcas"].get_scan(-1) - shape_objs["i4"] = InfoHolder(name, value) - - # TF coil winding pack thickness - name = "TF Coil winding pack thickness (m)" - value = mf_data.data["dr_tf_wp"].get_scan(-1) - shape_objs["i5"] = InfoHolder(name, value) - - return shape_objs - - -def populate_tok_ellipse_data(shape_objs, mf_data): - """Function to populate the ellipse objects with data for CTF case""" - - r_major = mf_data.data["rmajor"].get_scan(-1) - r_minor = mf_data.data["rminor"].get_scan(-1) - kappa_95 = mf_data.data["kappa95"].get_scan(-1) - delta_95 = mf_data.data["triang95"].get_scan(-1) - h_plasma = kappa_95 * r_minor - t_tf = mf_data.data["tfcth"].get_scan(-1) - t_scrape_i = mf_data.data["scrapli"].get_scan(-1) - t_scrape_o = mf_data.data["scraplo"].get_scan(-1) - t_fw_i = mf_data.data["fwith"].get_scan(-1) - t_fw_o = mf_data.data["fwoth"].get_scan(-1) - t_shield_i = mf_data.data["shldith"].get_scan(-1) - t_shield_o = mf_data.data["shldoth"].get_scan(-1) - t_blanket_i = mf_data.data["blnkith"].get_scan(-1) - t_blanket_o = mf_data.data["blnkoth"].get_scan(-1) - r_c = r_major - (r_minor * delta_95) - - # Ellipse info - x, y, z = get_xyz(mf_data) - - # Ellipse 1: inboard shield outer edge - shape_objs["e1"] = ProcessEllipse(x, y, z) - shape_objs["e1"].a = ( - r_minor - (r_major - r_c) + t_scrape_i + t_fw_i + t_blanket_i + t_shield_i - ) - shape_objs["e1"].b = h_plasma + t_scrape_o + t_fw_i + t_blanket_i + t_shield_i - shape_objs["e1"].c = r_c - - # Ellipse 2: inboard blanket outer edge - shape_objs["e2"] = ProcessEllipse(x, y, z) - shape_objs["e2"].a = r_minor - (r_major - r_c) + t_scrape_i + t_fw_i + t_blanket_i - shape_objs["e2"].b = h_plasma + t_scrape_i + t_fw_i + t_blanket_i - shape_objs["e2"].c = r_c - - # Ellipse 3: inboard first wall outer edge - shape_objs["e3"] = ProcessEllipse(x, y, z) - shape_objs["e3"].a = r_minor - (r_major - r_c) + t_scrape_i + t_fw_i + t_blanket_o - shape_objs["e3"].b = h_plasma + t_scrape_i + t_fw_i - shape_objs["e3"].c = r_c - - # Ellipse 4: inboard first wall inner edge - shape_objs["e4"] = ProcessEllipse(x, y, z) - shape_objs["e4"].a = r_minor - (r_major - r_c) + t_scrape_i - shape_objs["e4"].b = h_plasma + t_scrape_i - shape_objs["e4"].c = r_c - - # Ellipse 5: outboard first wall inner edge - shape_objs["e5"] = ProcessEllipse(x, y, z) - shape_objs["e5"].a = (r_major - r_c) + r_minor + t_scrape_o - shape_objs["e5"].b = h_plasma + t_scrape_o - shape_objs["e5"].c = r_c - - # Ellipse 6: outboard blanket inner edge - shape_objs["e6"] = ProcessEllipse(x, y, z) - shape_objs["e6"].a = (r_major - r_c) + r_minor + t_scrape_o + t_fw_o - shape_objs["e6"].b = h_plasma + t_scrape_o + t_fw_o - shape_objs["e6"].c = r_c - - # Ellipse 7: outboard shield inner edge - shape_objs["e7"] = ProcessEllipse(x, y, z) - shape_objs["e7"].a = (r_major - r_c) + r_minor + t_scrape_o + t_fw_o + t_blanket_o - shape_objs["e7"].b = h_plasma + t_scrape_o + t_fw_o + t_blanket_o - shape_objs["e7"].c = r_c - - # Ellipse 8: outboard shield outer edge - shape_objs["e8"] = ProcessEllipse(x, y, z) - shape_objs["e8"].a = ( - (r_major - r_c) + r_minor + t_scrape_o + t_fw_o + t_blanket_o + t_shield_o - ) - shape_objs["e8"].b = h_plasma + t_scrape_o + t_fw_o + t_blanket_o + t_shield_o - shape_objs["e8"].c = r_c - - # TF arc 1 - c_x = mf_data.data["xctfc(1)"].get_scan(-1) - c_y = mf_data.data["yctfc(1)"].get_scan(-1) - x_1 = mf_data.data["xarc(1)"].get_scan(-1) - y_1 = mf_data.data["yarc(1)"].get_scan(-1) - x_2 = mf_data.data["xarc(2)"].get_scan(-1) - y_2 = mf_data.data["yarc(2)"].get_scan(-1) - shape_objs["a1"] = ProcessTFArcs(c_x, c_y, x_1, y_1, x_2, y_2) - - # TF arc 2 - c_x = mf_data.data["xctfc(2)"].get_scan(-1) - c_y = mf_data.data["yctfc(2)"].get_scan(-1) - x_1 = mf_data.data["xarc(2)"].get_scan(-1) - y_1 = mf_data.data["yarc(2)"].get_scan(-1) - x_2 = mf_data.data["xarc(3)"].get_scan(-1) - y_2 = mf_data.data["yarc(3)"].get_scan(-1) - shape_objs["a2"] = ProcessTFArcs(c_x, c_y, x_1, y_1, x_2, y_2) - - # TF arc 3 - c_x = mf_data.data["xctfc(3)"].get_scan(-1) - c_y = mf_data.data["yctfc(3)"].get_scan(-1) - x_1 = mf_data.data["xarc(3)"].get_scan(-1) - y_1 = mf_data.data["yarc(3)"].get_scan(-1) - x_2 = mf_data.data["xarc(4)"].get_scan(-1) - y_2 = mf_data.data["yarc(4)"].get_scan(-1) - shape_objs["a3"] = ProcessTFArcs(c_x, c_y, x_1, y_1, x_2, y_2) - - # TF arc 4 - c_x = mf_data.data["xctfc(4)"].get_scan(-1) - c_y = mf_data.data["yctfc(4)"].get_scan(-1) - x_1 = mf_data.data["xarc(4)"].get_scan(-1) - y_1 = mf_data.data["yarc(4)"].get_scan(-1) - x_2 = mf_data.data["xarc(5)"].get_scan(-1) - y_2 = mf_data.data["yarc(5)"].get_scan(-1) - shape_objs["a4"] = ProcessTFArcs(c_x, c_y, x_1, y_1, x_2, y_2) - - # Extra information - # TF coil inner leg thickness - name = "TF Coil inner leg thickness (m)" - value = t_tf - shape_objs["i1"] = InfoHolder(name, value) - - # TF coil inner leg thickness - name = "TF Coil outer leg thickness (m)" - value = mf_data.data["tfthko"].get_scan(-1) - shape_objs["i2"] = InfoHolder(name, value) - - # TF coil inner plasma facing case thickness - name = "TF Coil inner leg plasma facing case thickness (m)" - value = mf_data.data["casthi"].get_scan(-1) - shape_objs["i3"] = InfoHolder(name, value) - - # TF coil inner case thickness - name = "TF Coil inner case thickness (m)" - value = mf_data.data["thkcas"].get_scan(-1) - shape_objs["i4"] = InfoHolder(name, value) - - # TF coil winding pack thickness - name = "TF Coil winding pack thickness (m)" - value = mf_data.data["dr_tf_wp"].get_scan(-1) - shape_objs["i5"] = InfoHolder(name, value) - - return shape_objs - - -def write_shapes_to_file(shape_data, filename): - """Function to write the ellipse data to a MCNP file.""" - output_file = open(filename, "w") - output_file.write("TZ x y z a b c\n") - for shape in shape_data: - if "e" in shape: - dat = shape_data[shape] - x = dat.x - y = dat.y - z = dat.z - a = dat.a - b = dat.b - c = dat.c - line = "TZ %.3f %.3f %.3f %.3f %.3f %.3f\n" % (x, y, z, a, b, c) - output_file.write(line) - output_file.write("\n") - - output_file.write("CZ x\n") - for shape in shape_data: - if "c" in shape: - dat = shape_data[shape] - dx = dat.dx - line = "CZ %.3f\n" % dx - output_file.write(line) - output_file.write("\n") - - output_file.write("PZ y\n") - for shape in shape_data: - if "p" in shape: - dat = shape_data[shape] - dy = dat.dy - line = "PZ %.3f\n" % dy - output_file.write(line) - - output_file.write("\n") - - output_file.write("AZ c_x c_y x_1 y_1 x_2 y_2\n") - for shape in shape_data: - if "a" in shape: - dat = shape_data[shape] - c_x = dat.c_x - c_y = dat.c_y - x_1 = dat.x_1 - y_1 = dat.y_1 - x_2 = dat.x_2 - y_2 = dat.y_2 - line = "AZ %.3f %.3f %.3f %.3f %.3f %.3f\n" % (c_x, c_y, x_1, y_1, x_2, y_2) - output_file.write(line) - output_file.write("\n") - - output_file.write("Info y\n") - for shape in shape_data: - if "i" in shape: - dat = shape_data[shape] - name = dat.name - value = dat.value - line = "%s \t=\t %.3f\n" % (name, value) - output_file.write(line) - - output_file.close() - - -if __name__ == "__main__": - - # Setup command line arguments - parser = argparse.ArgumentParser( - description="Process MFILE.DAT into " "PROCESS.MCNP file." - ) - - parser.add_argument( - "-f", - metavar="f", - type=str, - default="MFILE.DAT", - help="File to read as MFILE.DAT", - ) - - parser.add_argument( - "-o", - metavar="o", - type=str, - default="PROCESS.MCNP", - help="File to write as PROCESS.MCNP", - ) - - parser.add_argument("--ctf", help="True/False flag for CTF", action="store_true") - - args = parser.parse_args() - - main(args) diff --git a/utilities/mfile-to-json.py b/utilities/mfile-to-json.py deleted file mode 100755 index 884c4428..00000000 --- a/utilities/mfile-to-json.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python -""" - -Script to dump whole MFILE to file. - -J. Morris -UKAEA -27.02.20 - -""" - -import argparse -from process.io.mfile import MFile - -RADIAL_BUILD = [ - "bore", - "ohcth", - "precomp", - "gapoh", - "tfcth", - "tftsgap", - "thshield_ib", - "gapds", - "d_vv_in", - "shldith", - "vvblgap", - "blnkith", - "fwith", - "scrapli", - "rminor", - "rminor", - "scraplo", - "fwoth", - "blnkoth", - "vvblgap", - "shldoth", - "d_vv_out", - "gapsto", - "thshield_ob", - "tftsgap", - "tfthko", -] - -# Single-null vertical build -VERTICAL_BUILD_SN = [ - "tfcth", - "tftsgap", - "thshield_vb", - "vgap2", - "d_vv_bot", - "shldlth", - "divfix", - "vgap", - "rminor*kappa", - "rminor*kappa", - "vgaptop", - "fwtth", - "blnktth", - "vvblgap", - "shldtth", - "d_vv_top", - "vgap2", - "thshield_vb", - "tftsgap", - "tfcth", -] - -# Double-null vertical build -VERTICAL_BUILD_DN = [ - "tfcth", - "tftsgap", - "thshield_vb", - "vgap2", - "d_vv_bot", - "shldlth", - "divfix", - "vgap", - "rminor*kappa", - "rminor*kappa", - "vgap", - "divfix", - "shldlth", - "d_vv_bot", - "vgap2", - "thshield_vb", - "tftsgap", - "tfcth", -] - -if __name__ == "__main__": - - # Setup command line arguments - parser = argparse.ArgumentParser( - description="Produces a JSON file form an MFILE. " - "There are built in options for certain variable sets, by default " - "exports entire MFILE by default use help for other options" - "For info contact james.morris2@ukaea.uk." - ) - - parser.add_argument( - "-f", metavar="filename", type=str, default="", help="specify MFILE file path" - ) - - parser.add_argument( - "-n", type=int, help="Which scan to plot? (-1=last, 0=all)", default=0 - ) - - parser.add_argument( - "--radial_build", help="export radial build", action="store_true" - ) - - parser.add_argument( - "--vertical_build", help="export vertical build", action="store_true" - ) - - parser.add_argument( - "--all_build", help="export radial and vertical build", action="store_true" - ) - - parser.add_argument( - "--verbose", help="export more info with variables", action="store_true" - ) - - args = parser.parse_args() - - if args.f == "": - parser.print_help() - parser.exit(1) - - process_mfile = MFile(filename=args.f) - i_single_null = process_mfile.data["i_single_null"].get_scan(-1) - - if args.radial_build: - process_mfile.write_to_json( - keys_to_write=RADIAL_BUILD, scan=args.n, verbose=args.verbose - ) - elif args.vertical_build: - if i_single_null == 1: - process_mfile.write_to_json( - keys_to_write=VERTICAL_BUILD_SN, scan=args.n, verbose=args.verbose - ) - else: - process_mfile.write_to_json( - keys_to_write=VERTICAL_BUILD_DN, scan=args.n, verbose=args.verbose - ) - elif args.all_build: - if i_single_null == 1: - process_mfile.write_to_json( - keys_to_write=RADIAL_BUILD + VERTICAL_BUILD_SN, - scan=args.n, - verbose=args.verbose, - ) - else: - process_mfile.write_to_json( - keys_to_write=RADIAL_BUILD + VERTICAL_BUILD_DN, - scan=args.n, - verbose=args.verbose, - ) - else: - process_mfile.write_to_json(scan=args.n, verbose=args.verbose) diff --git a/utilities/morris_method_conf.json b/utilities/morris_method_conf.json deleted file mode 100644 index b005c318..00000000 --- a/utilities/morris_method_conf.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "bounds": [ - [ - 1.1, - 1.3 - ], - [ - 1.0, - 1.2 - ], - [ - 0.45, - 0.75 - ], - [ - 0.085, - 0.115 - ], - [ - 1e-05, - 0.0001 - ], - [ - 8.7, - 9.7 - ], - [ - 0.75, - 1.25 - ], - [ - 0.985, - 1.015 - ], - [ - 2.0, - 3.5 - ], - [ - 1.83, - 1.86 - ], - [ - 0.3, - 0.5 - ], - [ - 0.5, - 5.0 - ], - [ - 0.36, - 0.4 - ], - [ - 0.75, - 0.95 - ] - ], - "names": [ - "boundu(9)", - "hfact", - "coreradius", - "fimp(2)", - "fimp(14)", - "psepbqarmax", - "boundl(103)", - "cboot", - "peakfactrad", - "kappa", - "etaech", - "feffcd", - "etath", - "etaiso" - ], - "num_vars": 14 -} diff --git a/utilities/ndscan.py b/utilities/ndscan.py deleted file mode 100755 index 9f1c605b..00000000 --- a/utilities/ndscan.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python -""" -Code to run multi-dimensional parameter scans using PROCESS. - -Author: S. Torrisi (storrisi@u.rochester.edu) - -Date: August 2014 - -Input files: -ndscan.json (configuration file, per default in working directory) - -""" - - -from process_io_lib.process_config import NdScanConfig -import argparse - -if __name__ == "__main__": - ############################################################ - # Usage - - PARSER = argparse.ArgumentParser( - description="Program to run a multi-\ - dimensional parameter scan using PROCESS." - ) - - PARSER.add_argument( - "-f", - "--configfile", - default="ndscan.json", - help="configuration file, default = ndscan.json", - ) - - ARGS = PARSER.parse_args() - - ############################################################ - # main program - - NDSCANNER = NdScanConfig(ARGS.configfile) - NDSCANNER.start_scan() diff --git a/utilities/ndscan_and_package.py b/utilities/ndscan_and_package.py deleted file mode 100755 index 00985dbb..00000000 --- a/utilities/ndscan_and_package.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -""" -Code to run multi-dimensional parameter scans using PROCESS. - -Author: S. Torrisi (storrisi@u.rochester.edu) - -Date: August 2014 - -Input files: -ndscan.json (configuration file, per default in working directory) - -""" - - -from process_io_lib.process_config import NdScanConfig -import process_io_lib.NCDFfromMFILE as NC -import argparse - -if __name__ == "__main__": - ############################################################ - # Usage - - PARSER = argparse.ArgumentParser( - description="Program to run a multi-\ - dimensional parameter scan using PROCESS." - ) - - PARSER.add_argument( - "-f", - "--configfile", - default="ndscan.json", - help="configuration file, default = ndscan.json", - ) - - ARGS = PARSER.parse_args() - - ############################################################ - # main program - - NDSCANNER = NdScanConfig(ARGS.configfile) - NDSCANNER.start_scan() - - NCONVERTER = NC.NCDFconverter(ARGS.configfile) - NCONVERTER.convert_mfilelibrary() diff --git a/utilities/ndscan_package_only.py b/utilities/ndscan_package_only.py deleted file mode 100755 index f2617054..00000000 --- a/utilities/ndscan_package_only.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python -""" -Code to run multi-dimensional parameter scans using PROCESS. - -Author: S. Torrisi (storrisi@u.rochester.edu) - -Date: August 2014 - -Input files: -ndscan.json (configuration file, per default in working directory) - -Outputfiles: - - -Compatible with PROCESS version 382 - -""" - -import process_io_lib.NCDFfromMFILE as NCD -import argparse - -if __name__ == "__main__": - ############################################################ - # Usage - - PARSER = argparse.ArgumentParser( - description="Program to create a netcdf\ - file from a multi-dimensional parameter scan." - ) - - PARSER.add_argument( - "-f", - "--configfile", - default="ndscan.json", - help="configuration file, default = ndscan.json", - ) - - ARGS = PARSER.parse_args() - - ############################################################ - # main program - - NCONVERTER = NCD.NCDFconverter(ARGS.configfile) - NCONVERTER.convert_mfilelibrary() diff --git a/utilities/ndscan_visualisation.py b/utilities/ndscan_visualisation.py deleted file mode 100755 index 9334c54b..00000000 --- a/utilities/ndscan_visualisation.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python -""" -Code to visualise multi-dimensional parameter scans. - -Author: S. Torrisi (storrisi@u.rochester.edu) - -Date: August 2014 - -Input files: -NdscanOutput.nc (netcdf file, per default in working directory) - -""" - - -from process_io_lib.ndscan_vis import VisUtility -import argparse - - -if __name__ == "__main__": - ############################################################ - # Usage - - PARSER = argparse.ArgumentParser( - description="Program to visualise a multi-\ - dimensional parameter scan." - ) - - PARSER.add_argument( - "-f", - "--netcdffile", - default="NdscanOutput.nc", - help="netcdf file, default = NdscanOutput.nc", - ) - - ARGS = PARSER.parse_args() - - ############################################################ - # main program - VISUTIL = VisUtility(ARGS.netcdffile) - VISUTIL.main() diff --git a/utilities/output_data.py b/utilities/output_data.py deleted file mode 100755 index 4039d2dd..00000000 --- a/utilities/output_data.py +++ /dev/null @@ -1,319 +0,0 @@ -#! /usr/bin/env python -""" -Outputs a file with set of data very similar to plot proc.py, -but to a comma-delimited format for inclusion in spreadsheets. -Input: MFILE.DAT -Output: process/_summary.txt (or as specified by user) -Optional arguments: - # change the input file name - python plot_proc.py -f MFILE.DAT - # change the output file name - python plot_proc.py -o out.txt -Richard Kemp, 08/10/2014, CCFE -Based on plot_proc.py by James Morris -Latest update 07/08/2015 MDK -Revisions - q95, not q and q_edge - "alphat" and "alphan" peaking factors no longer apply with the pedestal profile model -""" - -import argparse -import process.io.mfile as mf -from dict_tools import proc_dict - - -def write_data(data, mfile_data, f, scan): - """Function to write output to file. - - Arguments: - data --> list of data to write - m_file_data --> MFILE.DAT data to read - f --> output file reference - scan --> scan to read from MFILE.DAT - - """ - for i in range(len(data)): - if isinstance(data[i][0], str): - if mfile_data.data[data[i][0]].exists: - dat = mfile_data.data[data[i][0]].get_scan(scan) - if isinstance(dat, str): - value = dat - else: - value = "%.4g" % mfile_data.data[data[i][0]].get_scan(scan) - if "alpha" in data[i][0]: - value = str(float(value) + 1.0) - if isinstance(dat, str): - f.write('"' + data[i][1] + '", "' + value + '"\n') - else: - f.write('"' + data[i][1] + '", ' + value + "\n") - else: - mfile_data.data[data[i][0]].get_scan(-1) - f.write("value missing!") - else: - dat = data[i][0] - if isinstance(dat, str): - value = dat - else: - value = "%.4g" % data[i][0] - f.write('"' + data[i][1] + '", ' + value + "\n") - - -def main(mfile_data, output_file, scan=-1): - """Function to output summary data for insertion into spreadsheet. - - Arguments: - m_file_data --> MFILE.DAT data to read - output_file --> file to write to - scan --> scan to read from MFILE.DAT - - """ - - GENERAL = [ - ("procver", "PROCESS_Version"), - ("date", "Date:"), - ("time", "Time:"), - ("username", "User:"), - ] - - in_blanket_thk = mfile_data.data["shldith"].get_scan(scan) + mfile_data.data[ - "blnkith" - ].get_scan(scan) - out_blanket_thk = mfile_data.data["shldoth"].get_scan(scan) + mfile_data.data[ - "blnkoth" - ].get_scan(scan) - - GEOMETRY = [ - ("rmajor", "R_0"), - ("rminor", "a"), - ("aspect", "A"), - ("kappa95", "kappa_95"), - ("triang95", "delta_95"), - ("sarea", "Surface area"), - ("vol", "Plasma volume"), - ("n_tf", "No. of TF coils"), - (in_blanket_thk, "i/b blkt/shld"), - (out_blanket_thk, "o/b blkt/shld"), - ("powfmw", "Fusion power"), - ] - - nong = mfile_data.data["dnla"].get_scan(scan) / mfile_data.data[ - "dlimit(7)" - ].get_scan(scan) - dnz = mfile_data.data["dnz"].get_scan(scan) / mfile_data.data["dene"].get_scan(scan) - tepeak = mfile_data.data["te0"].get_scan(scan) / mfile_data.data["te"].get_scan( - scan - ) - - nepeak = mfile_data.data["ne0"].get_scan(scan) / mfile_data.data["dene"].get_scan( - scan - ) - - PHYSICS = [ - ("plascur/1d6", "I_p"), - ("bt", "Vacuum B_T"), - ("q95", "q95"), - ("normalised thermal beta", "beta_N, thermal"), - ("normalised total beta", "beta_N, total"), - ("thermal poloidal beta", "beta_P, thermal"), - ("betap", "beta_P, total"), - ("te", ""), - ("dene", ""), - (nong, "/n_G"), - (tepeak, "T_e0/"), - (nepeak, "n_e0/"), - ("zeff", "Z_eff"), - (dnz, "n_Z/n_e"), - ("taueff", "tau_e"), - ("hfact", "H-factor"), - ("tauelaw", "Scaling law"), - ] - - pthresh = mfile_data.data["pthrmw(6)"].get_scan(scan) - gross_eff = 100.0 * ( - mfile_data.data["pgrossmw"].get_scan(scan) - / mfile_data.data["pthermmw"].get_scan(scan) - ) - net_eff = 100.0 * ( - ( - mfile_data.data["pgrossmw"].get_scan(scan) - - mfile_data.data["htpmw"].get_scan(scan) - ) - / ( - mfile_data.data["pthermmw"].get_scan(scan) - - mfile_data.data["htpmw"].get_scan(scan) - ) - ) - plant_eff = 100.0 * ( - mfile_data.data["pnetelmw"].get_scan(scan) - / mfile_data.data["powfmw"].get_scan(scan) - ) - - POWER_FLOWS = [ - ("wallmw", "Av. neutron wall load"), - ("pinnerzoneradmw", "inner zone radiation"), - ("psyncpv*vol", "Synchrotron radiation"), - ("pouterzoneradmw", "outer zone radiation"), - ("pnucblkt", "Nuclear heating in blanket"), - ("pnucshld", "Nuclear heating in shield"), - ("pdivt", "Psep / Pdiv"), - (pthresh, "H-mode threshold (M=2.5)"), - ("divlife", "Divertor life"), - ("pthermmw", "Thermal Power"), - (gross_eff, "Gross cycle efficiency"), - (net_eff, "Net cycle efficiency"), - ("pgrossmw", "Gross electric power"), - ("pnetelmw", "Net electric power"), - (plant_eff, "Plant efficiency Pe/Pfus"), - ] - - pinjie = mfile_data.data["pinjmw"].get_scan(scan) - pdivt = mfile_data.data["pdivt"].get_scan(scan) - pdivr = pdivt / mfile_data.data["rmajor"].get_scan(scan) - pdivnr = ( - 10.0e20 - * mfile_data.data["pdivt"].get_scan(scan) - / ( - mfile_data.data["rmajor"].get_scan(scan) - * mfile_data.data["dene"].get_scan(scan) - ) - ) - pthresh = mfile_data.data["pthrmw(6)"].get_scan(scan) - flh = pdivt / pthresh - powerht = mfile_data.data["powerht"].get_scan(scan) - psync = mfile_data.data["psyncpv*vol"].get_scan(scan) - pbrem = mfile_data.data["pinnerzoneradmw"].get_scan(scan) - hfact = mfile_data.data["hfact"].get_scan(scan) - hstar = hfact * (powerht / (powerht + psync + pbrem)) ** 0.31 - - CURRENT_DRIVE = [ - (pinjie, "SS auxiliary power"), - ("pheat", "Power for heating only"), - ("bootipf", "Bootstrap fraction"), - ("faccd", "Auxiliary fraction"), - ("facoh", "Ohmic fraction"), - ("gamnb", "NB gamma"), - ("enbeam", "NB energy"), - ("powerht", "Assumed heating power"), - (pdivr, "Pdiv/R"), - (pdivnr, "Pdiv/(n R)"), - (flh, "Pdiv/PLH"), - (hstar, "H* (non-rad. corr.)"), - ] - - # Number of coils (1 is OH coil) - number_of_coils = 0 - for item in mfile_data.data.keys(): - if "rpf(" in item: - number_of_coils += 1 - pf_info = [] - for i in range(1, number_of_coils): - if i % 2 != 0: - pf_info.append( - ( - mfile_data.data["ric(%s)" % str(i).zfill(2)].get_scan(scan), - "PF %s" % str(i), - ) - ) - tburn = mfile_data.data["tburn"].get_scan(scan) / 3600.0 - vssoft = mfile_data.data["vsres"].get_scan(scan) + mfile_data.data[ - "vsind" - ].get_scan(scan) - - COIL_CURRENTS = [ - (pf_info[0][0], pf_info[0][1]), - (pf_info[1][0], pf_info[1][1]), - (pf_info[2][0], pf_info[2][1]), - (vssoft, "Startup flux swing"), - ("vstot", "Available flux swing"), - (tburn, "Burn time"), - ("bmaxtf", "Peak field at conductor"), - ("iooic", r"I/I$_{\mathrm{crit}}$"), - ("tmarg", "Temperature margin"), - ("sig_tf_case", "TF case maximum shear stress (Tresca criterion)"), - ("sig_tf_wp", "TF conduit maximum shear stress (Tresca criterion)"), - ( - "sig_tf_case_max", - "Allowable maximum shear stress in the TF case (Tresca criterion)", - ), - ( - "sig_tf_wp_max", - "Allowable maximum shear stress in the TF conduit (Tresca criterion)", - ), - ] - - COSTS = [("coe", "Cost of electricity")] - - # open file for writing - f = open(output_file, "w") - - opt = proc_dict.DICT_OPTIMISATION_VARS[ - abs(int(mfile_data.data["minmax"].get_scan(scan))) - ] - f.write('"GENERAL"\n') - write_data(GENERAL, mfile_data, f, scan) - f.write('"optimising:", "' + opt + '"\n') - f.write("\n") - - f.write('"GEOMETRY"\n') - write_data(GEOMETRY, mfile_data, f, scan) - f.write("\n") - - f.write('"PHYSICS"\n') - write_data(PHYSICS, mfile_data, f, scan) - f.write("\n") - - f.write('"POWER_FLOWS"\n') - write_data(POWER_FLOWS, mfile_data, f, scan) - f.write("\n") - - f.write('"CURRENT_DRIVE"\n') - write_data(CURRENT_DRIVE, mfile_data, f, scan) - f.write("\n") - - f.write('"COIL_CURRENTS"\n') - write_data(COIL_CURRENTS, mfile_data, f, scan) - f.write("\n") - - f.write('"COSTS"\n') - write_data(COSTS, mfile_data, f, scan) - f.write("\n") - - # close file - f.close() - - -if __name__ == "__main__": - - # Setup command line arguments - parser = argparse.ArgumentParser( - description="Produce a single-column " - "comma-separated (.txt) summary " - "for a given scan." - "For info contact " - "rich.kemp@ccfe.ac.uk or" - "james.morris2@ccfe.ac.uk" - ) - - parser.add_argument( - "-f", - metavar="FILENAME", - type=str, - default="MFILE.DAT", - help="specify input filename", - ) - - parser.add_argument( - "-o", - metavar="OUTPUT", - type=str, - default="process_summary.txt", - help="specify output filename", - ) - - args = parser.parse_args() - - # read MFILE - m_file = mf.MFile(args.f) - - # run main - main(m_file, args.o) diff --git a/utilities/output_detailed.json b/utilities/output_detailed.json deleted file mode 100644 index 500cb008..00000000 --- a/utilities/output_detailed.json +++ /dev/null @@ -1,1489 +0,0 @@ -{ - "Times": { - "project": true, - "ro": { - "name": "Ronald Wenninger", - "email": "ronald.wenninger@efda.org", - "institute": "IPP Garching" - }, - "children": [], - "diagrams": [ - { - "filename": "documentation/figures/process_times.png", - "caption": "PROCESS timings **from** the given PROCESS run." - } - ], - "constraints": [ - "times" - ], - "iteration-variables": [ - "Times Variables" - ], - "input-sections": [ - "Times Variables" - ], - "inputs": [], - "output-sections": [], - "outputs": [ - "tcycle", - "tohs", - "theat", - "tburn", - "tqnch", - "tdwell" - ], - "exclusions": [] - }, - "Physics": { - "project": true, - "ro": { - "name": "Ronald Wenninger", - "email": "ronald.wenninger@efda.org", - "institute": "IPP Garching" - }, - "children": [ - "Plasma Physics", - "Divertor Physics" - ], - "diagrams": [], - "constraints": [ - "physics", - "divertor", - "divertor_kallenbach", - "impurity_radiation" - ], - "iteration-variables": [ - "Physics Variables", - "Divertor Variables", - "Divertor Kallenbach Variables", - "Impurity Radiation Module" - ], - "input-sections": [ - "Physics Variables", - "Divertor Variables", - "Divertor Kallenbach Variables", - "Impurity Radiation Module" - ], - "inputs": [], - "output-sections": [ - "Plasma Geometry", - "Current and Field", - "Beta information", - "Temperature and Density", - "Impurity Radiation", - "Plasma Profiles", - "Fusion Power", - "Radiation Power", - "Confinement", - "Plasma Volt-s Requirements", - "Divertor Physics" - ], - "outputs": [], - "exclusions": [] - }, - "Plasma Physics": { - "project": false, - "children": [ - "Plasma Geometry", - "Current and Field", - "Beta information", - "Temperature and Density", - "Impurity Radiation", - "Plasma Profiles", - "Fusion Power", - "Radiation Power", - "Confinement", - "Plasma Volt-s Requirements" - ], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [], - "output-sections": [], - "outputs": [], - "exclusions": [] - }, - "Divertor Physics": { - "project": false, - "children": [ - "Divertor" - ], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [], - "output-sections": [], - "outputs": [], - "exclusions": [] - }, - "Plasma Geometry": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 21, - 23, - 24 - ], - "iteration-variables": [ - 1, - 3, - 5, - 18, - 73, - 74 - ], - "input-sections": [], - "inputs": [ - "rmajor", - "rminor", - "aspect", - "kappa", - "kappa95", - "triang", - "triang95", - "q0", - "q95" - ], - "output-sections": [], - "outputs": [ - "rmajor", - "rminor", - "aspect", - "kappa", - "kappa95", - "triang", - "triang95", - "xarea", - "sarea", - "vol", - "q0", - "q95" - ], - "exclusions": [] - }, - "Current and Field": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 10, - 23, - 25 - ], - "iteration-variables": [ - 2, - 18 - ], - "input-sections": [], - "inputs": [ - "icurr", - "alphaj", - "rli", - "bt", - "q0", - "q" - ], - "output-sections": [], - "outputs": [ - "plascur/1d6", - "alphaj", - "rli", - "bvert", - "bt", - "bp", - "btot", - "q0", - "q95", - "qstar" - ], - "exclusions": [] - }, - "Beta information": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 1, - 6, - 24, - 48 - ], - "iteration-variables": [ - 5, - 8, - 36 - ], - "input-sections": [], - "inputs": [ - "beta", - "betap", - "betaft", - "betanb", - "gammaft", - "dnbeta", - "betalim" - ], - "output-sections": [], - "outputs": [ - "beta", - "betap", - "betaft", - "betanb", - "gammaft", - "dnbeta" - ], - "exclusions": [] - }, - "Temperature and Density": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 1, - 5, - 7 - ], - "iteration-variables": [ - 4, - 5, - 9, - 109, - 119 - ], - "input-sections": [], - "inputs": [ - "ralpne" - ], - "output-sections": [], - "outputs": [ - "te", - "te0", - "ti", - "ti0", - "ten", - "dene", - "ne0", - "dnla", - "dnla_gw", - "dnitot", - "deni", - "dnz", - "dnalp", - "dnprot", - "dnbeam", - "dnelimt", - "ralpne" - ], - "exclusions": [] - }, - "Impurity Radiation": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 17, - 67 - ], - "iteration-variables": [ - 28, - 102, - 112, - 116, - 125, - 126, - 127, - 128, - 129, - 130, - 131, - 132, - 133, - 134, - 135, - 136 - ], - "input-sections": [], - "inputs": [ - "imprad_model", - "fimp(01", - "fimp(02", - "fimp(03", - "fimp(04", - "fimp(05", - "fimp(06", - "fimp(07", - "fimp(08", - "fimp(09", - "fimp(10", - "fimp(11", - "fimp(12", - "fimp(13", - "fimp(14", - "aion" - ], - "output-sections": [], - "outputs": [ - "imprad_model", - "fimp(01", - "fimp(02", - "fimp(03", - "fimp(04", - "fimp(05", - "fimp(06", - "fimp(07", - "fimp(08", - "fimp(09", - "fimp(10", - "fimp(11", - "fimp(12", - "fimp(13", - "fimp(14", - "aion", - "zeff" - ], - "exclusions": [] - }, - "Plasma Profiles": { - "project": false, - "children": [], - "diagrams": [ - { - "filename": "documentation/figures/process_plasma_profiles.png", - "caption": "PROCESS plasma profiles." - } - ], - "constraints": [], - "iteration-variables": [ - 119 - ], - "input-sections": [], - "inputs": [ - "alphan", - "ipedestal", - "rhopedn", - "neped", - "fgwped", - "rhopedt", - "ieped", - "teped", - "tesep", - "nesep", - "fgwsep", - "alphat", - "tbeta" - ], - "output-sections": [], - "outputs": [ - "alphan", - "ipedestal", - "rhopedn", - "neped", - "fgwped", - "rhopedt", - "ieped", - "teped", - "tesep", - "nesep", - "fgwsep", - "alphat", - "tbeta" - ], - "exclusions": [] - }, - "Fusion Power": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 9, - 28 - ], - "iteration-variables": [ - 26 - ], - "input-sections": [], - "inputs": [ - "fdeut", - "ftrit", - "falpha", - "tauratio" - ], - "output-sections": [], - "outputs": [ - "fdeut", - "ftrit", - "powfmw", - "pdt", - "pdd", - "pdhe3", - "palpmw", - "palpnb", - "pneutmw", - "pchargemw", - "pohmmw", - "falpha", - "falpe", - "falpi", - "ptrimw", - "ptremw", - "pinjimw", - "pinjemw", - "tauratio", - "qfuel", - "rndfuel", - "burnup", - "bigq" - ], - "exclusions": [] - }, - "Radiation Power": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 17, - 67 - ], - "iteration-variables": [ - 28, - 102, - 112, - 116 - ], - "input-sections": [], - "inputs": [ - "ssync", - "coreradius", - "coreradiationfraction", - "peakfactrad", - "maxradwallload" - ], - "output-sections": [], - "outputs": [ - "pbrempv*vol", - "plinepv*vol", - "psyncpv*vol", - "ssync", - "coreradius", - "coreradiationfraction", - "pinnerzoneradmw", - "pouterzoneradmw", - "pradmw", - "rad_fraction_total", - "photon_wall", - "peakfactrad", - "maxradwallload", - "peakradwallload", - "wallmw" - ], - "exclusions": [] - }, - "Confinement": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 62 - ], - "iteration-variables": [ - 110 - ], - "input-sections": [], - "inputs": [ - "hfact", - "taulimit" - ], - "output-sections": [], - "outputs": [ - "hfact", - "taueff", - "tauei", - "tauee", - "taup", - "taulimit" - ], - "exclusions": [] - }, - "Plasma Volt-s Requirements": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 12 - ], - "iteration-variables": [ - 15 - ], - "input-sections": [], - "inputs": [ - "gamma" - ], - "output-sections": [], - "outputs": [ - "vsstt", - "vsind", - "gamma", - "vsres", - "vsbrn", - "bootipf", - "vburn", - "rplas", - "res_time", - "rlp", - "abs(vstot)", - "vssoft", - "vsmax" - ], - "exclusions": [] - }, - "Divertor": { - "project": false, - "ro": { - "name": "Ronald Wenninger", - "email": "ronald.wenninger@efda.org", - "institute": "IPP Garching" - }, - "children": [], - "diagrams": [], - "constraints": [ - 18, - 56, - 68 - ], - "iteration-variables": [ - 27, - 97, - 117 - ], - "input-sections": [], - "inputs": [ - "psepbqarmax", - "pseprmax" - ], - "output-sections": [], - "outputs": [ - "pdivt", - "pdivt/rmajor", - "pdivtbt/qar" - ], - "exclusions": [] - }, - "Magnets": { - "project": true, - "ro": { - "name": "Matti Coleman", - "email": "matti.coleman@euro-fusion.org", - "institute": "CCFE" - }, - "children": [ - "TF Coils", - "PF Coils", - "Central Solenoid" - ], - "diagrams": [], - "constraints": [ - "pf_power", - "pfcoil", - "tfcoil" - ], - "iteration-variables": [ - "Pf Power Variables", - "Pfcoil Variables", - "Tfcoil Variables" - ], - "input-sections": [ - "Pf Power Variables", - "Pfcoil Variables", - "Tfcoil Variables" - ], - "inputs": [], - "output-sections": [ - "General Coil Parameters", - "Coil Geometry", - "Stress and Forces", - "Quench Information", - "Conductor Information", - "Winding Pack", - "External Case Information", - "PF Coils", - "Central Solenoid" - ], - "outputs": [], - "exclusions": [ - "hmax", - "tfc model", - "sigvert", - "s_mises_cond", - "s_mises_case", - "leni", - "acstf", - "fcutfsu", - "1-fcutfsu", - "jscoh_bop", - "jscoh_eof", - "jstrandoh_eof", - "rjohc", - "areaoh-awpoh" - ] - }, - "TF Coils": { - "project": false, - "children": [ - "General Coil Parameters", - "Coil Geometry", - "Stress and Forces", - "Quench Information", - "Conductor Information", - "Winding Pack", - "External Case Information" - ], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [], - "output-sections": [], - "outputs": [], - "exclusions": [] - }, - "PF Coils": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [ - "sigpfcalw", - "sigpfcf", - "fcupfsu", - "isumatpf", - "strncon_pf" - ], - "output-sections": [], - "outputs": [ - "fcupfsu", - "vstot" - ], - "exclusions": [] - }, - "Central Solenoid": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 26, - 27, - 60 - ], - "iteration-variables": [ - 16, - 37, - 38, - 39, - 41, - 42, - 106, - 122, - 123 - ], - "input-sections": [], - "inputs": [ - "isumatoh", - "bore", - "ohcth", - "oh_steel_frac", - "alstroh", - "vfohc", - "strncon_cs" - ], - "output-sections": [], - "outputs": [ - "bmaxoh0", - "bore", - "ohcth", - "oh_steel_frac", - "vfohc", - "cohbop", - "bmaxoh", - "coheof", - "gapoh", - "areaoh", - "awpoh", - "awpoh*(1-vfohc)", - "awpoh*vfohc", - "alstroh", - "sig_hoop", - "sig_axial", - "s_tresca_oh", - "axial_force", - "fcuohsu", - "tftmp", - "tmargoh" - ], - "exclusions": [] - }, - "General Coil Parameters": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [], - "output-sections": [], - "outputs": [ - "i_tf_sc_mat", - "jwptf", - "tfno", - "ritfc/1.d6", - "bmaxtfrp", - "ripmax", - "ripple", - "estotftgj", - "whttf/tfno" - ], - "exclusions": [] - }, - "Coil Geometry": { - "project": false, - "children": [], - "diagrams": [ - { - "filename": "documentation/figures/TFLeg.png", - "caption": "Toroidal Field coil inboard leg geometry." - } - ], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [], - "output-sections": [], - "outputs": [ - "tfcth", - "tfcth", - "tfthko", - "tficrn", - "tfocrn", - "tfleng", - "rtfcin", - "rtot", - "gapds" - ], - "exclusions": [] - }, - "Stress and Forces": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 31, - 32 - ], - "iteration-variables": [ - 48, - 49 - ], - "input-sections": [], - "inputs": [ - "alstrtf" - ], - "output-sections": [], - "outputs": [ - "vforce", - "cforce", - "alstrtf", - "s_tresca_case", - "s_tresca_cond", - "bc20m", - "tc0m" - ], - "exclusions": [] - }, - "Quench Information": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [ - 34, - 65, - 35, - 36 - ], - "iteration-variables": [ - 51, - 52, - 54, - 56 - ], - "input-sections": [], - "inputs": [ - "sigvvall", - "tmaxpro", - "tmargmin" - ], - "output-sections": [], - "outputs": [ - "taucq", - "tftmp", - "jwdgpro", - "jwptf", - "vdalw", - "vtfskv" - ], - "exclusions": [] - }, - "Conductor Information": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [], - "iteration-variables": [ - 58 - ], - "input-sections": [], - "inputs": [ - "strncon_tf" - ], - "output-sections": [], - "outputs": [ - "cable_width", - "thwcndut", - "thicndut", - "bmax", - "fhetot", - "jcritsc", - "jcritstr", - "jwdgop", - "jwdgcrt", - "icrit", - "iooic", - "cable_width", - "leni", - "thwcndut", - "thicndut", - "acstf" - ], - "exclusions": [] - }, - "Winding Pack": { - "project": false, - "children": [], - "diagrams": [ - { - "filename": "documentation/figures/TFWindingPack.png", - "caption": "Toroidal Field coil winding pack with dimensions" - } - ], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [ - "dhecoil", - "vftf", - "fcutfsu", - "tinstf", - "tfinsgap", - "cpttf" - ], - "output-sections": [], - "outputs": [ - "acond/ap", - "aiwp/ap", - "avwp/ap", - "awphec/ap", - "dr_tf_wp", - "wwp1", - "wwp2", - "turnstf", - "1-fcutfsu" - ], - "exclusions": [] - }, - "External Case Information": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [ - "casthi", - "thkcas", - "casths" - ], - "output-sections": [], - "outputs": [ - "acasetf", - "acasetfo" - ], - "exclusions": [] - }, - "Breeder Blanket": { - "project": true, - "ro": { - "name": "Cismondi Fabio", - "email": "fabio.cismondi@euro-fusion.org", - "institute": "KIT" - }, - "children": [], - "diagrams": [], - "constraints": [ - 52, - 55 - ], - "iteration-variables": [ - 89, - 95 - ], - "input-sections": [], - "inputs": [ - "blnkith", - "blnkoth", - "shldith", - "shldoth", - "fwith", - "fwith", - "li6enrich", - "fw_armour_thickness", - "tbrmin" - ], - "output-sections": [], - "outputs": [ - "ptfnuc", - "pnucfw", - "pnucblkt", - "pnucshld", - "pnucdiv", - "fwpressure", - "blpressure", - "fwarea", - "rdewex", - "zdewex", - "tbr" - ], - "exclusions": [] - }, - "Heating and Current Drive": { - "project": true, - "ro": { - "name": "Matti Coleman", - "email": "matti.coleman@euro-fusion.org", - "institute": "CCFE" - }, - "children": [], - "diagrams": [], - "diagrams": [], - "constraints": [ - 14, - 20, - 30, - 37, - 40, - 59 - ], - "iteration-variables": [ - 7, - 11, - 19, - 33, - 40, - 44, - 46, - 47, - 105 - ], - "input-sections": [], - "inputs": [ - "iefrf", - "etaech", - "pinjalw", - "gamma_ecrh", - "enbeam", - "beamwd", - "bscfmax", - "cboot", - "etalh", - "etanbi", - "etaof", - "feffcd", - "forbitloss", - "frbeam", - "ftritbm", - "irfcd", - "nbshield", - "pheat", - "tbeamin", - "gamma" - ], - "output-sections": [], - "outputs": [ - "bigq", - "effcd", - "gamcd", - "etacd", - "faccd", - "facoh", - "bootipf", - "fvsbrnni", - "echpwr", - "echwpow", - "plascur/1d6", - "te", - "te0", - "ti", - "ti0", - "dene", - "ne0", - "rli", - "pinjmw", - "pinjwp", - "pohmmw" - ], - "exclusions": [] - }, - "Balance Of Plant": { - "project": true, - "ro": { - "name": "Test Name", - "email": "test.name@myemail.com", - "institute": "Test Lab" - }, - "children": [ - "FW Blanket", - "TF Power", - "PF Power", - "Electrical Power" - ], - "diagrams": [ - { - "filename": "documentation/figures/PowerFlow.png", - "caption": "PROCESS power balance" - } - ], - "constraints": [ - "heat_transport" - ], - "iteration-variables": [ - "Heat Transport Variables" - ], - "input-sections": [ - "Heat Transport Variables" - ], - "inputs": [], - "output-sections": [ - "FW Blanket", - "TF Power", - "PF Power", - "Electrical Power" - ], - "outputs": [], - "exclusions": [] - }, - "FW Blanket": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [ - "dp_he", - "t_in_bb", - "htpmw_fw_blkt", - "htpmw_div", - "htpmw_shld", - "emult", - "fhcd", - "fdiv", - "etath" - ], - "output-sections": [], - "outputs": [ - "fpump", - "p_plasma", - "t_in_compressor", - "htpmw", - "htpsecmw", - "pinjht", - "tfcmw", - "pthermmw" - ], - "exclusions": [] - }, - "TF Power": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [], - "output-sections": [], - "outputs": [ - "itfka", - "vtfskv", - "tchghr", - "ltfth", - "rcoils", - "tfcv", - "ntfbkr", - "ndumpr", - "r1dump", - "r1ppmw", - "r1emj", - "ttfsec", - "tfpsv", - "tfpska", - "tfckw", - "tfackw", - "rpower", - "xpower", - "tfbusl", - "rtfbus", - "vtfbus", - "tfacpd" - ], - "exclusions": [] - }, - "PF Power": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [ - "maxpoloidalpower" - ], - "output-sections": [], - "outputs": [ - "pfckts", - "spsmva", - "spfbusl", - "pfbuspwr", - "srcktpm", - "vpfskv", - "ensxpfm", - "peakpoloidalpower" - ], - "exclusions": [] - }, - "Electrical Power": { - "project": false, - "children": [], - "diagrams": [], - "constraints": [], - "iteration-variables": [], - "input-sections": [], - "inputs": [], - "output-sections": [], - "outputs": [ - "basemw", - "bdvmw", - "crypmw", - "htpmw", - "ppfmw", - "ptfmw", - "pheatingmw", - "trithtmw", - "vachtmw" - ], - "exclusions": [] - }, - "Tritium Fuelling and Vacuum": { - "project": true, - "ro": { - "name": "Cismondi Fabio", - "email": "fabio.cismondi@euro-fusion.org", - "institute": "KIT" - }, - "children": [], - "diagrams": [], - "constraints": [ - "vacuum" - ], - "iteration-variables": [ - "Vacuum Variables" - ], - "input-sections": [ - "Vacuum Variables" - ], - "inputs": [ - "rat", - "pbase", - "tdwell" - ], - "output-sections": [], - "outputs": [ - "ogas", - "s(1)", - "snet(1)", - "volume", - "pend", - "pstart", - "s(2)", - "snet(2)", - "prdiv", - "fhe", - "s(3)", - "snet(3)", - "frate", - "s(4)", - "snet(4)", - "nduct", - "d(imax)", - "l1", - "dout", - "l2", - "l3", - "pumpn" - ], - "exclusions": [] - }, - "Builds": { - "project": false, - "ro": { - "name": "Christian Bachmann", - "email": "christian.bachmann@euro-fusion.org", - "institute": "IPP Garching" - }, - "diagrams": {}, - "radial_diagram": { - "filename": "documentation/figures/process_radial_plot.png", - "caption": "PROCESS radial build plot **from** given PROCESS run. " - }, - "Radial Build": { - "1": { - "name": "bore", - "des": "Machine bore", - "combination": false - }, - "2": { - "name": "ohcth", - "des": "Central solenoid", - "combination": false - }, - "3": { - "name": "precomp", - "des": "CS precompression", - "combination": false - }, - "4": { - "name": "gapoh", - "des": "Gap between precomp and TF inboard", - "combination": false - }, - "5": { - "name": "tfcth", - "des": "TF coil inboard leg", - "combination": false - }, - "6": { - "name": "deltf", - "des": "Gap between TF inboard and thermal shield", - "combination": false - }, - "7": { - "name": "thshield_ib", - "des": "Inboard thermal shield", - "combination": false - }, - "8": { - "name": "gapds", - "des": "Gap between thermal shield and vacuum vessel", - "combination": false - }, - "9": { - "name": "d_vv_in + shldith", - "des": "Inboard vacuum vessel (and shielding)", - "combination": true, - "combo_type": "+" - }, - "10": { - "name": "vvblgap", - "des": "Gap between vacuum vessel and inboard blanket", - "combination": false - }, - "11": { - "name": "blnkith", - "des": "Inboard blanket", - "combination": false - }, - "12": { - "name": "fwith", - "des": "Inboard first wall", - "combination": false - }, - "13": { - "name": "scrapli", - "des": "Inboard scrape-off layer", - "combination": false - }, - "14": { - "name": "rminor", - "des": "Plasma inboard minor radius", - "combination": false - }, - "15": { - "name": "rminor", - "des": "Plasma outboard minor radius", - "combination": false - }, - "16": { - "name": "scraplo", - "des": "Outboard scrape-off layer", - "combination": false - }, - "17": { - "name": "fwoth", - "des": "Outboard first wall", - "combination": false - }, - "18": { - "name": "blnkoth", - "des": "Outboard blanket", - "combination": false - }, - "19": { - "name": "vvblgap", - "des": "Gap between vacuum vessel and outboard blanket", - "combination": false - }, - "20": { - "name": "d_vv_out + shldoth", - "des": "Outboard vacuum vessel and shielding", - "combination": true, - "combo_type": "+" - }, - "21": { - "name": "gapsto", - "des": "Gap between outboard vacuum vessel and thermal shield", - "combination": false - }, - "22": { - "name": "thshield_ob", - "des": "Outboard thermal shield", - "combination": false - }, - "23": { - "name": "tftsgap", - "des": "Gap between outboard thermal shield and TF outboard", - "combination": false - }, - "24": { - "name": "tfthko", - "des": "TF coil outboard leg", - "combination": false - } - }, - "Vertical Build": { - "single": { - "1": { - "name": "tfcth", - "des": "Top TF Coil", - "combination": false - }, - "2": { - "name": "tftsgap", - "des": "Gap between thermal shield and top TF coil", - "combination": false - }, - "3": { - "name": "thshield_vb", - "des": "Upper thermal shield", - "combination": false - }, - "4": { - "name": "vgap2", - "des": "Gap between thermal shield and top vacuum vessel", - "combination": false - }, - "5": { - "name": "d_vv_top + shldtth", - "des": "Top vacuum vessel (and shielding)", - "combination": true, - "combo_type": "+" - }, - "6": { - "name": "vvblgap", - "des": "Gap between top vacuum vessel and top blanket", - "combination": false - }, - "7": { - "name": "blnktth", - "des": "Top blanket thickness", - "combination": false - }, - "8": { - "name": "fwtth", - "des": "Top first wall", - "combination": false - }, - "9": { - "name": "vgaptop", - "des": "Top scrape-off layer", - "combination": false - }, - "10": { - "name": "rminor * kappa", - "des": "Plasma half-height (top)", - "combination": true, - "combo_type": "*" - }, - "11": { - "name": "rminor * kappa", - "des": "Plasma half-height (bottom)", - "combination": true, - "combo_type": "*" - }, - "12": { - "name": "vgap", - "des": "Lower scrape-off layer", - "combination": false - }, - "13": { - "name": "divfix", - "des": "Divertor structure", - "combination": false - }, - "14": { - "name": "d_vv_bot + shldlth", - "des": "Lower vacuum vessel (and shielding)", - "combination": true, - "combo_type": "+" - }, - "15": { - "name": "vgap2", - "des": "Gap between lower vacuum vessel and thermal shield", - "combination": false - }, - "16": { - "name": "thshield_vb", - "des": "Lower thermal shield", - "combination": false - }, - "17": { - "name": "tftsgap", - "des": "Gap between lower thermal shield and lower TF coil", - "combination": false - }, - "18": { - "name": "tfcth", - "des": "Lower TF coil", - "combination": false - } - }, - "double": { - "1": { - "name": "test", - "des": "test", - "combination": false - } - } - } - } -} diff --git a/utilities/output_detailed.py b/utilities/output_detailed.py deleted file mode 100644 index c25f083b..00000000 --- a/utilities/output_detailed.py +++ /dev/null @@ -1,989 +0,0 @@ -""" - - Create PROCESS output summary in html - - James Morris 06/06/2017 - CCFE - -""" - -# Third party libraries -import os -import sys -import json -import argparse -from collections import OrderedDict -from grip import export - -# PROCESS libraries -from process.io.in_dat import InDat -from process.io.mfile import MFile -from process_io_lib.process_plots import ( - plot_pulse_timings, - radial_bar_plot, - plasma_profiles_plot, -) - -try: - import process_io_lib.process_dicts as proc_dict -except ImportError: - print( - "The Python dictionaries have not yet been created. Please run", - " 'make dicts'!", - ) - exit() - -# Constants -EXCLUSIONS = ["ixc", "icc"] -MODULES = dict() -ICC_FULL = proc_dict.DICT_ICC_FULL -ICC_VARS = proc_dict.DICT_ICC_VARS -IXC_FULL = proc_dict.DICT_IXC_FULL -IXC_DEF = proc_dict.DICT_IXC_DEFAULT -DES_FIMP = proc_dict.DICT_FIMP -MODULES_FULL = proc_dict.DICT_MODULE -DEFAULTS = proc_dict.DICT_DEFAULT -DESCRIPTIONS = proc_dict.DICT_DESCRIPTIONS -FOM = proc_dict.DICT_OPTIMISATION_VARS - - -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -# Markdown - - -def bold(x): - return "**" + str(x) + "**" - - -def unbold(x): - return x.replace("*", "") - - -def code(x): - return "`" + str(x) + "`" - - -def bold_italics(x): - return "**_" + str(x) + "_**" - - -def output_line(x): - OUTFILE.write(x + "\n\n") - - -def heading(x, y): - OUTFILE.write("{0} {1}\n\n".format("#" * x, y)) - - -def output_below_table_comment(x, y, z): - OUTFILE.write("> **{0}** : **{1}** : {2}\n\n".format(x, y, z)) - - -def output_newline(): - OUTFILE.write("\n\n") - - -def heading_comment(x): - if x in COMMENTS: - OUTFILE.write("> {0}\n\n".format(" ".join(COMMENTS[x]))) - - -def return_to_top(): - OUTFILE.write("[:arrow_up:](#contents)\n\n") - - -def output_pic(x, y, z): - OUTFILE.write("![{0}]({1} '{2}')\n\n".format(x, y, z)) - - -def bold_info(x, y): - OUTFILE.write("{0}: {1}\n\n".format(bold(x), y)) - - -def content_heading(x): - tag = x.lower().replace(" ", "-").replace(":", "") - x = x.replace("-", " ") - OUTFILE.write("[{0}](#{1})\n\n".format(x, tag)) - - -def content_subheading(x): - tag = x.lower().replace(" ", "-").replace(":", "") - OUTFILE.write("{0}[{1}](#{2})\n\n".format(" " * 8, x, tag)) - - -def content_subsubheading(x): - tag = x.lower().replace(" ", "-").replace(":", "") - OUTFILE.write("{0}[{1}](#{2})\n\n".format(" " * 16, x, tag)) - - -def section_ro(section_name): - """ - print mailto: for section RO - """ - ro_name = DATA[section_name]["ro"]["name"] - ro_email = DATA[section_name]["ro"]["email"] - ro_lab = DATA[section_name]["ro"]["institute"] - OUTFILE.write( - "PMU RO: [{0}](mailto:{1}) ({2})\n\n".format(ro_name, ro_email, ro_lab) - ) - - -def table_heading(headings): - header_line = "|" - under_line = "|" - for item in headings: - header_line += " {0} |".format(item) - under_line += " --- |" - OUTFILE.write(header_line + "\n") - OUTFILE.write(under_line + "\n") - - -def table_line(items, form): - item_line = "|" - item_format = "{0:" + "{0}".format(form) + "}" - for item in items: - if isinstance(item, float): - item_line += item_format.format(item) + " |" - elif isinstance(item, int): - item_line += " {0} |".format(item) - else: - try: - eval(item) - item_line += item_format.format(item) + " |" - except (NameError, ValueError, SyntaxError, TypeError): - item_line += " {0} |".format(item) - OUTFILE.write(item_line + "\n") - - -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -def get_itvar_values(): - """ - Get iteration variable values from MFILE - """ - itvar_vals = dict() - - for item in MFILE_LIST: - if item[:5] == "itvar": - name = MFILE.data[item].var_description - value = MFILE.data[item].get_scan(-1) - itvar_vals[name] = value - return itvar_vals - - -def get_indat_comments(): - """ - Get IN.DAT #tag comments - """ - lines = open(COMMAND_ARGS.f).readlines() - - for line in lines: - if line[:2] == "*#": - try: - split_line = line.split(":", 1) - key = split_line[0][2:].replace(" ", "") - value = split_line[1].strip(" ").strip("\n") - COMMENTS.setdefault(key, []).append(value) - except Exception: - print("Error with comment for line in IN.DAT :: {0}".format(line)) - - -def constraint_comments(ky, iccs): - """ - Display constraint comments for this module - """ - - for icc in iccs: - con_key = "constraint-{0}".format(icc) - if con_key in COMMENTS.keys(): - con_name = ICC_FULL[str(icc)]["name"] - comment = " ".join(COMMENTS[con_key]) - if any(c.isalpha() for c in comment): - output_below_table_comment(icc, con_name, comment) - - -def iteration_comments(ky, ixcs): - """ - Display iteration comments for this module - """ - - for ixc in ixcs: - itv_key = "iteration-variable-{0}".format(ixc) - if itv_key in COMMENTS.keys(): - itv_name = IXC_FULL[str(ixc)]["name"] - comment = " ".join(COMMENTS[itv_key]) - if len(comment) > 1: - output_below_table_comment(ixc, itv_name, comment) - - -def output_contents(): - """ - Print contents page - """ - - bold_info("Username", MFILE.data["username"].get_scan(-1)) - bold_info("Date", MFILE.data["date"].get_scan(-1)) - bold_info("Time", MFILE.data["time"].get_scan(-1)) - bold_info("PROCESS version", MFILE.data["procver"].get_scan(-1)) - bold_info("PROCESS Tag Number", MFILE.data["tagno"].get_scan(-1)) - bold_info("Run description", MFILE.data["runtitle"].get_scan(-1)) - - figure_of_merit = FOM[MFILE.data["minmax"].get_scan(-1)] - if MFILE.data["minmax"].get_scan(-1) < 0: - fom_type = "maximise" - else: - fom_type = "minimise" - bold_info("Figure of merit", "{0} {1}".format(fom_type, figure_of_merit.lower())) - - bold_info( - "PROCESS references", - "[website](http://www.ccfe.ac.uk/powerplants.aspx), [physics paper](http://www.sciencedirect.com/science/article/pii/S0920379614005961), [engineering paper](http://www.sciencedirect.com/science/article/pii/S0920379616300072)", - ) - bold_info( - "Contacts", - "[James Morris](mailto:james.morris2@ukaea.uk), [Hanni Lux](mailto:hanni.lux@ukaea.uk), [Michael Kovari](mailto:michael.kovari@ukaea.uk)", - ) - - # Top level comment - output_line(bold("IN.DAT Comment")) - heading_comment("header-title") - - heading(1, "Contents") - - content_heading("Output PDF") - - content_heading("Builds") - content_subheading("Radial Build") - content_subheading("Vertical Build") - - for k, v in DATA.items(): - if v["project"]: - content_heading(k) - - if len(v["children"]) != 0: - for child in v["children"]: - content_subheading(child) - - if len(DATA[child]["children"]) != 0: - for child2 in DATA[child]["children"]: - content_subsubheading(child2) - - -def include_diagram(filename, caption): - """ - Put requested image in output - """ - global CAPTIONNUMBER - - try: - output_pic(caption, filename, caption) - output_line("Figure {0}: {1}".format(CAPTIONNUMBER, caption)) - CAPTIONNUMBER += 1 - except Exception: - print("Cannot find image {0}".format(filename)) - pass - - -def output_constraints(k, numbers=False): - """ - Output constraints list of constraint mods - """ - - MODULE_ICC_LIST = list() - if numbers: - cons = DATA[k]["constraints"] - for c in cons: - if c in CONSTRAINTS: - MODULE_ICC_LIST.append(c) - # else: - # print("Requested constraint {0} not a constraint for this MFILE".format(c)) - else: - MODULE_ICC_LIST = sum( - [MODULES[key] for key in MODULES.keys() if key in DATA[k]["constraints"]], - [], - ) - - if len(MODULE_ICC_LIST) == 0: - return - - heading(4, "Constraints") - - table_heading( - [ - "Constraint", - "Description", - "Limit Name", - "Limit", - "Value", - "F-Value Name", - "F-Value Value", - ] - ) - - for item in MODULE_ICC_LIST: - - con_name = ICC_FULL[str(item)]["name"] - - if ICC_VARS[str(item)] != "consistency" and ICC_VARS[str(item)] != "empty": - if "f" in ICC_VARS[str(item)].keys(): - con_f_val_name = ICC_VARS[str(item)]["f"] - if con_f_val_name not in MFILE_LIST: - if con_f_val_name in IT_VAR_VALUES.keys(): - con_f_val = IT_VAR_VALUES[con_f_val_name] - else: - if con_f_val_name in INPUTS_LIST: - con_f_val = INFILE.data[con_f_val_name].value - else: - con_f_val = IXC_DEF[con_f_val_name] - con_f_val = bold(con_f_val) - else: - con_f_val = MFILE.data[con_f_val_name].get_scan(-1) - - con_lim_name = ICC_VARS[str(item)]["v"] - - if con_lim_name in MFILE.data.keys(): - con_lim = MFILE.data[con_lim_name].get_scan(-1) - else: - if con_lim_name in DEFAULTS.keys(): - con_lim = DEFAULTS[con_lim_name] - else: - con_lim = bold("calculated") - else: - con_f_val_name = "-" - con_lim_name = "consistency" - con_f_val = "-" - con_lim = "-" - - if con_f_val != "-" and con_lim != "-" and con_lim != bold("calculated"): - if isinstance(con_lim, str): - con_lim = unbold(con_lim) - if isinstance(con_f_val, str): - con_f_val = unbold(con_f_val) - actual_value = "{0:.2e}".format(float(con_lim) * float(con_f_val)) - con_lim = "{0:.2e}".format(float(con_lim)) - con_f_val = "{0:.2e}".format(float(con_f_val)) - else: - actual_value = "-" - - table_line( - [ - str(item), - con_name, - con_lim_name, - con_lim, - actual_value, - con_f_val_name, - con_f_val, - ], - ".4g", - ) - - constraint_comments(k, MODULE_ICC_LIST) - - -def output_itvars(k, numbers=False): - """ - Output iteration variables for section k - """ - - if numbers: - MODULES_IXC_NUMBERS_LIST = list() - itvars_list = DATA[k]["iteration-variables"] - for it in itvars_list: - if it in IT_VARS: - MODULES_IXC_NUMBERS_LIST.append(it) - # else: - # print("Requested iteration variable {0} not iteration variable for this MFILE".format(it)) - MODULES_IXC_NAMES_LIST = [ - IXC_FULL[str(ixc)]["name"] for ixc in MODULES_IXC_NUMBERS_LIST - ] - - else: - mod_names = DATA[k]["iteration-variables"] - MODULES_IXC_NAMES_LIST = [ - IXC_FULL[str(ixc)]["name"] - for ixc in IT_VARS - if any( - IXC_FULL[str(ixc)]["name"] in MODULES_FULL[mod_name] - for mod_name in mod_names - ) - ] - - MODULES_IXC_NUMBERS_LIST = [ - ixc - for ixc in IT_VARS - if any( - IXC_FULL[str(ixc)]["name"] in MODULES_FULL[mod_name] - for mod_name in mod_names - ) - ] - - if len(MODULES_IXC_NAMES_LIST) == 0: - return - - heading(4, "Iteration Variables") - output_line("* Values in **bold** are **not default** but user inputs.") - table_heading( - [ - "No.", - "Name", - "Final Value", - "Description", - "Starting Value", - "Lower Bound", - "Upper Bound", - ] - ) - - for item in IT_VARS: - - item_name = IXC_FULL[str(item)]["name"] - if "fimp(" in item_name: - item_description = DES_FIMP[item_name] - else: - item_description = DESCRIPTIONS[item_name].split("\n")[0] - - if item_name in IT_VAR_VALUES.keys(): - item_value = IT_VAR_VALUES[item_name] - else: - item_value = MFILE.data[item_name].get_scan(-1) - - if item_name in INPUTS_LIST: - starting_value = INFILE.data[item_name].value - starting_value = bold(starting_value) - else: - if "fimp(" in item_name: - starting_value = item_value - fimp_index = int(item_name.split("(")[-1].split(")")[0]) - starting_value = INFILE.data["fimp"].value[fimp_index - 1] - else: - starting_value = IXC_DEF[item_name] - - if item_name in MODULES_IXC_NAMES_LIST: - - if str(item) in BOUNDS.keys(): - if "l" in BOUNDS[str(item)].keys(): - low_bound = BOUNDS[str(item)]["l"] - low_bound = bold(low_bound) - else: - low_bound = IXC_FULL[str(item)]["lb"] - - if "u" in BOUNDS[str(item)].keys(): - up_bound = BOUNDS[str(item)]["u"] - up_bound = bold(up_bound) - else: - up_bound = IXC_FULL[str(item)]["ub"] - - else: - low_bound = IXC_FULL[str(item)]["lb"] - up_bound = IXC_FULL[str(item)]["ub"] - - table_line( - [ - str(item), - code(item_name), - item_value, - item_description, - starting_value, - low_bound, - up_bound, - ], - ".4g", - ) - - iteration_comments(k, MODULES_IXC_NUMBERS_LIST) - - -def output_inputs(k, manual=False): - """ - Output inputs for section k - """ - - mfile_values = list() - if manual: - MODULE_INPUT_LIST = list() - inputs = DATA[k]["inputs"] - for inp in inputs: - if inp in INPUTS_LIST: - MODULE_INPUT_LIST.append(inp) - elif inp in MFILE_LIST: - if ( - MFILE.data[inp].var_flag != "OP" - and MFILE.data[inp].var_flag != "ITV" - ): - mfile_values.append(inp) - MODULE_INPUT_LIST.append(inp) - # else: - # print("The input you requested {0} is not in the input file".format(inp)) - else: - mod_names = DATA[k]["input-sections"] - MODULE_INPUT_LIST = [ - item - for item in INPUTS_LIST - if any(item in MODULES_FULL[mod_name] for mod_name in mod_names) - ] - - if len(MODULE_INPUT_LIST) == 0: - return - - heading(4, "Inputs") - output_line( - "* Values in **bold** are **default** input values. Output in MFILE but **NOT** specified in IN.DAT." - ) - table_heading(["Input", "Value", "Description", "Comment"]) - - for item in MODULE_INPUT_LIST: - if ( - item not in IT_VAR_LIST - and item not in EXCLUSIONS - and item not in DATA[k]["exclusions"] - ): - - if "fimp(" in item: - item_name = "fimp" - else: - item_name = item - - if item_name not in INPUTS_LIST: - item_value = str(MFILE.data[item].get_scan(-1)) - else: - if "fimp(" not in item: - item_value = INFILE.data[item].value - else: - fimp_index = int(item.split("(")[-1].split(")")[0]) - item_value = str(INFILE.data["fimp"].value[fimp_index - 1]) - - if "fimp(" not in item: - item_des = DESCRIPTIONS[item].split("\n")[0] - - if "in-" + item in COMMENTS.keys(): - comment = " ".join(COMMENTS["in-" + item]) - if comment == "": - comment = "" - else: - comment = "" - - if isinstance(item_value, list): - table_line([code(item), bold("array"), item_des], ".4g") - for i in range(len(item_value)): - if "fimp" in item: - item_des = DES_FIMP["fimp({0})".format(i + 1)] - else: - item_des = "-" - table_line( - [ - code(item) + "[{0}]".format(i), - item_value[i], - item_des, - comment, - ], - ".4g", - ) - elif "," in item_value: - table_line([code(item), bold("array"), item_des], ".4g") - item_value = item_value.split(",") - for i in range(len(item_value)): - if item_value[i] != "": - if "fimp" in item: - item_des = DES_FIMP["fimp({0})".format(i + 1)] - else: - item_des = "-" - table_line( - [ - code(item) + "[{0}]".format(i), - item_value[i], - item_des, - comment, - ], - ".4g", - ) - else: - if item_name not in INPUTS_LIST: - table_line([code(item), bold(item_value), item_des, comment], ".4g") - else: - table_line([code(item), item_value, item_des, comment], ".4g") - - -def output_outputs(k, mod_out_list=[]): - """ - Output outputs - """ - - MODULE_OUTPUTS_LIST = DATA[k]["outputs"] + mod_out_list - heading(4, "Outputs") - table_heading(["Output", "Value", "Description"]) - - for item in MODULE_OUTPUTS_LIST: - item_value = MFILE.data[item].get_scan(-1) - if item not in INPUTS_LIST and item not in DATA[k]["exclusions"]: - - try: - item_des = MFILE.data[item].var_description.replace("_", " ") - if item_des.lower() == item.lower(): - item_name = "" - else: - item_name = code(item) - table_line([item_name.replace(" ", "_"), item_value, item_des], ".4g") - except AttributeError: - print("Warning: Skipping item: {0}".format(item)) - pass - - -def output_output_sections(k): - """ - Output the outputs from another sections - """ - - outputs_list = list() - - for item in DATA[k]["output-sections"]: - outputs_list += DATA[item]["outputs"] - - output_outputs(k, mod_out_list=outputs_list) - - -def output_section(k): - """ - Output various items from section depending on JSON config file - """ - - if len(DATA[k]["constraints"]) != 0: - if DATA[k]["project"]: - if isinstance(DATA[k]["constraints"][0], str): - output_constraints(k) - else: - output_constraints(k, numbers=True) - else: - output_constraints(k, numbers=True) - - if len(DATA[k]["iteration-variables"]) != 0: - if DATA[k]["project"]: - if isinstance(DATA[k]["constraints"][0], str): - output_itvars(k) - else: - output_itvars(k, numbers=True) - else: - output_itvars(k, numbers=True) - - if len(DATA[k]["input-sections"]) != 0: - output_inputs(k) - - if len(DATA[k]["inputs"]) != 0: - output_inputs(k, manual=True) - - if len(DATA[k]["output-sections"]) != 0: - output_output_sections(k) - - if len(DATA[k]["outputs"]) != 0: - output_outputs(k) - - -def output_section_topper(level, section_name, value): - """ - outputs the top level information for the section - """ - - heading(level, section_name) - - if level == 1: - section_ro(section_name) - heading_name = "header-{0}".format(section_name.lower().replace(" ", "-")) - heading_comment(heading_name) - return_to_top() - if value["diagrams"] != []: - for item in value["diagrams"]: - include_diagram(item["filename"], item["caption"]) - - -def output_projects(): - """ - Output the projects - """ - - for k, v in DATA.items(): - if v["project"]: - output_section_topper(1, k, v) - output_section(k) - - if len(v["children"]) != 0: - for child in v["children"]: - output_section_topper(2, child, DATA[child]) - output_section(child) - - if len(DATA[child]["children"]) != 0: - for child2 in DATA[child]["children"]: - output_section_topper(3, child2, DATA[child2]) - output_section(child2) - - -def output_vertical_build(): - """ - Output for vertical build - """ - - heading(2, "Vertical Build") - table_heading(["Name", "Thickness [m]", "Height [m]", "Description"]) - - if MFILE.data["i_single_null"].get_scan(-1): - vert_build = DATA["Builds"]["Vertical Build"]["single"] - else: - vert_build = DATA["Builds"]["Vertical Build"]["double"] - - tot_height = ( - MFILE.data["hmax"].get_scan(-1) - - MFILE.data["shldlth"].get_scan(-1) - - MFILE.data["divfix"].get_scan(-1) - - MFILE.data["vgap"].get_scan(-1) - + MFILE.data["vgaptop"].get_scan(-1) - + MFILE.data["shldtth"].get_scan(-1) - + MFILE.data["fwtth"].get_scan(-1) - + MFILE.data["blnktth"].get_scan(-1) - + MFILE.data["vvblgap"].get_scan(-1) - + MFILE.data["tfcth"].get_scan(-1) - ) - - v_build_sum = tot_height - - for k, v in vert_build.items(): - - item_name = vert_build[k]["name"] - item_des = vert_build[k]["des"] - item_combined = vert_build[k]["combination"] - - if item_combined: - combo_type = vert_build[k]["combo_type"] - items = item_name.replace(" ", "").split(combo_type) - item_1_value = MFILE.data[items[0]].get_scan(-1) - item_2_value = MFILE.data[items[1]].get_scan(-1) - if combo_type == "+": - item_value = item_1_value + item_2_value - elif combo_type == "*": - item_value = item_1_value * item_2_value - else: - item_value = MFILE.data[item_name].get_scan(-1) - - if int(k) < 10: - table_line([code(item_name), item_value, v_build_sum, item_des], ".3f") - v_build_sum -= item_value - elif int(k) == 10: - table_line([code(item_name), item_value, v_build_sum, item_des], ".3f") - v_build_sum = 0 - table_line([code("Midplane"), 0.0, v_build_sum, "Device midplane"], ".3f") - elif int(k) > 10: - v_build_sum -= item_value - table_line([code(item_name), item_value, v_build_sum, item_des], ".3f") - - -def output_radial_build(): - """ - Output radial build - """ - - heading(2, "Radial Build") - - try: - radial_plot_file = DATA["Builds"]["radial_diagram"]["filename"] - radial_plot_caption = DATA["Builds"]["radial_diagram"]["caption"] - include_diagram(radial_plot_file, radial_plot_caption) - except Exception: - print("Cannot find image {0}".format(radial_plot_file["filename"])) - pass - - table_heading(["Name", "Thickness [m]", "Radial Position [m]", "Description"]) - - r_build_sum = 0 - rad_build = DATA["Builds"]["Radial Build"] - - for k, v in rad_build.items(): - - item_name = rad_build[k]["name"] - item_des = rad_build[k]["des"] - item_combined = rad_build[k]["combination"] - - if item_combined: - if rad_build[k]["combo_type"] == "+": - item_1 = item_name.split("+")[0].replace(" ", "") - item_2 = item_name.split("+")[1].replace(" ", "") - item_1_value = MFILE.data[item_1].get_scan(-1) - item_2_value = MFILE.data[item_2].get_scan(-1) - item_value = item_1_value + item_2_value - elif rad_build[k]["combo_type"] == "*": - item_1 = item_name.split("*")[0].replace(" ", "") - item_2 = item_name.split("*")[1].replace(" ", "") - item_1_value = MFILE.data[item_1].get_scan(-1) - item_2_value = MFILE.data[item_2].get_scan(-1) - item_value = item_1_value * item_2_value - else: - item_value = MFILE.data[item_name].get_scan(-1) - - r_build_sum += item_value - - table_line([code(item_name), item_value, r_build_sum, item_des], ".3f") - - return - - -def output_modules(): - """ - Output the modules to markdown - """ - - icc = INFILE.data["icc"].value - - for item in icc: - module = proc_dict.DICT_ICC_MODULE[str(item)] - MODULES.setdefault(module, []).append(item) - - get_indat_comments() - - output_contents() - - heading(1, "Output PDF") - include_diagram("process_diagram.png", "PROCESS output diagrams") - - output_section_topper(1, "Builds", DATA["Builds"]) - output_radial_build() - output_vertical_build() - - output_projects() - - OUTFILE.close() - - -def create_plots(): - """ - Create all required plots for output - """ - - if not os.path.exists("documentation/figures/"): - save_p = "" - else: - save_p = "documentation/figures/" - - # create pulse timings plot - plot_pulse_timings(MFILE, save=True, show=False, save_path=save_p) - - # create radial build plot - # TODO: non-hard coded list needed - radial_build = [ - "bore", - "ohcth", - "precomp", - "gapoh", - "tfcth", - "deltf", - "thshield_ib", - "gapds", - "d_vv_in", - "shldith", - "vvblgap", - "blnkith", - "fwith", - "scrapli", - "rminor", - ] - radial_bar_plot(radial_build, MFILE, show=False, save=True, save_path=save_p) - - # create plasma profiles plot - plasma_profiles_plot(MFILE, show=False, save=True, save_path=save_p) - - -def main(cargs): - """ - Main - """ - - create_plots() - - output_modules() - - export( - path=cargs.o + ".md", - password="e35a21bfce5462bebbecc2e43d12bf4ec2ba469d", - render_wide=True, - render_inline=True, - out_filename=cargs.o + ".html", - title="PROCESS Output", - ) - - print("Over...") - - return - - -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -if __name__ == "__main__": - - PARSER = argparse.ArgumentParser( - description="Create PROCESS output document." - "For info contact james.morris2@ukaea.uk" - ) - - PARSER.add_argument( - "-f", metavar="MFILENAME", type=str, default="", help="specify PROCESS MFILE" - ) - - PARSER.add_argument( - "-j", - metavar="JSONFILE", - type=str, - default="output_detailed.json", - help="specify JSON file location and name", - ) - - PARSER.add_argument( - "-o", - metavar="OUTFILENAME", - type=str, - default="output_detailed", - help="specify output file", - ) - - COMMAND_ARGS = PARSER.parse_args() - - # read json - try: - json_filename = COMMAND_ARGS.j - DATA = json.load(open(json_filename), object_pairs_hook=OrderedDict) - except ValueError as e: - print("Error in JSON config file. Please correct error: {0}".format(e)) - sys.exit() - except FileNotFoundError: - print( - "Cannot find JSON config file. Expecting file in: {0}".format( - COMMAND_ARGS.j - ) - ) - sys.exit() - - if COMMAND_ARGS.f: - - # read mfile - MFILE = MFile(filename=COMMAND_ARGS.f) - MFILE_LIST = MFILE.data.keys() - - # read input file - INFILE = InDat(filename=COMMAND_ARGS.f, start_line=MFILE.mfile_end) - INPUTS_LIST = INFILE.data.keys() - - # initialise COMMENTS dictionary - COMMENTS = dict() - - CONSTRAINTS = INFILE.data["icc"].value - IT_VARS = INFILE.data["ixc"].value - IT_VAR_LIST = [IXC_FULL[str(itvar)]["name"] for itvar in IT_VARS] - BOUNDS = INFILE.data["bounds"].value - IT_VAR_VALUES = get_itvar_values() - - OUTFILE = open(COMMAND_ARGS.o + ".md", "w") - - # caption numbering - CAPTIONNUMBER = 1 - - main(COMMAND_ARGS) - - else: - print("Please enter a reference MFILE with -f!") diff --git a/utilities/output_summary.py b/utilities/output_summary.py deleted file mode 100644 index 81596dfb..00000000 --- a/utilities/output_summary.py +++ /dev/null @@ -1,795 +0,0 @@ -""" - - Create PROCESS output document - - James Morris 11/05/2017 - CCFE - -""" - -# Third party libraries -import sys -import json -import argparse -from collections import OrderedDict -from grip import export - -# PROCESS libraries -from process.io.in_dat import InDat -from process.io.mfile import MFile -from dict_tools import proc_dict - -# Constants -MODULES = dict() -ICC_FULL = proc_dict.DICT_ICC_FULL -ICC_VARS = proc_dict.DICT_ICC_VARS -IXC_FULL = proc_dict.DICT_IXC_FULL -IXC_DEF = proc_dict.DICT_IXC_DEFAULT -DES_FIMP = proc_dict.DICT_FIMP -MODULES_FULL = proc_dict.DICT_MODULE -DEFAULTS = proc_dict.DICT_DEFAULT -DESCRIPTIONS = proc_dict.DICT_DESCRIPTIONS - -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -# Markdown - - -def bold(x): - return "**" + str(x) + "**" - - -def unbold(x): - return x.replace("*", "") - - -def code(x): - return "`" + str(x) + "`" - - -def bold_italics(x): - return "**_" + str(x) + "_**" - - -def output_line(x): - OUTFILE.write(x + "\n\n") - - -def heading(x, y): - OUTFILE.write("{0} {1}\n\n".format("#" * x, y)) - - -def output_below_table_comment(x, y, z): - OUTFILE.write("> **{0}** : **{1}** : {2}\n\n".format(x, y, z)) - - -def output_newline(): - OUTFILE.write("\n\n") - - -def heading_comment(x): - if x in COMMENTS: - OUTFILE.write("> {0}\n\n".format(" ".join(COMMENTS[x]))) - - -def return_to_top(): - OUTFILE.write("[:arrow_up:](#contents)\n\n") - - -def output_png(x, y, z): - OUTFILE.write("![{0}]({1} '{2}')\n\n".format(x, y, z)) - - -def bold_info(x, y): - OUTFILE.write("{0}: {1}\n\n".format(bold(x), y)) - - -def content_heading(x): - tag = x.lower().replace(" ", "-").replace(":", "") - x = x.replace("-", " ") - OUTFILE.write("[{0}](#{1})\n\n".format(x, tag)) - - -def content_subheading(x): - tag = x.lower().replace(" ", "-").replace(":", "") - OUTFILE.write("    [{0}](#{1})\n\n".format(x, tag)) - - -def table_heading(headings): - header_line = "|" - under_line = "|" - for item in headings: - header_line += " {0} |".format(item) - under_line += " --- |" - OUTFILE.write(header_line + "\n") - OUTFILE.write(under_line + "\n") - - -def table_line(items, form): - item_line = "|" - item_format = "{0:" + "{0}".format(form) + "}" - for item in items: - if isinstance(item, float): - item_line += item_format.format(item) + " |" - elif isinstance(item, int): - item_line += " {0} |".format(item) - else: - try: - eval(item) - item_line += item_format.format(item) + " |" - # item_line += " {0:.4g} |".format(item) - except (NameError, ValueError, SyntaxError, TypeError): - item_line += " {0} |".format(item) - OUTFILE.write(item_line + "\n") - - -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - -def check_empty(key): - """ - Check not an empty section - """ - con_mods = DATA["projects"][key]["icc_modules"] - mod_names = DATA["projects"][key]["in_dat_modules"] - tot_count = 0 - con_count = 0 - it_count = 0 - in_count = 0 - out_count = 0 - result = dict() - - CONSTRAINTS = INFILE.data["icc"].value - IT_VARS = INFILE.data["ixc"].value - IT_VAR_LIST = [IXC_FULL[str(itvar)]["name"] for itvar in IT_VARS] - MODULE_VAR_LIST = sum([MODULES_FULL[mod_name] for mod_name in mod_names], []) - MODULE_ICC_LIST = sum( - [MODULES[key] for key in MODULES.keys() if key in con_mods], [] - ) - - for item in CONSTRAINTS: - if item in MODULE_ICC_LIST: - tot_count += 1 - con_count += 1 - - for item in IT_VARS: - item_name = IXC_FULL[str(item)]["name"] - if item_name in MODULE_VAR_LIST: - tot_count += 1 - it_count += 1 - - for item in INPUTS_LIST: - if item in MODULE_VAR_LIST: - if item not in IT_VAR_LIST: - tot_count += 1 - in_count += 1 - - for item in MFILE_LIST: - if item in MODULE_VAR_LIST: - if item not in INPUTS_LIST: - tot_count += 1 - out_count += 1 - - result["tot_count"] = tot_count - result["con_count"] = con_count - result["it_count"] = it_count - result["in_count"] = in_count - result["out_count"] = out_count - - return result - - -def get_itvar_values(): - """ - Get iteration variable values from MFILE - """ - itvar_vals = dict() - - for item in MFILE_LIST: - if item[:5] == "itvar": - name = MFILE.data[item].var_description - value = MFILE.data[item].get_scan(-1) - itvar_vals[name] = value - return itvar_vals - - -def get_indat_comments(): - """ - Get IN.DAT #tag comments - """ - lines = open(COMMAND_ARGS.f).readlines() - - for line in lines: - if line[:2] == "*#": - split_line = line.split(":") - key = split_line[0][2:].replace(" ", "") - value = split_line[1].strip(" ") - COMMENTS.setdefault(key, []).append(value) - - -def output_contents(): - """ - Print contents page - """ - - bold_info("Username", MFILE.data["username"].get_scan(-1)) - bold_info("Date", MFILE.data["date"].get_scan(-1)) - bold_info("Time", MFILE.data["time"].get_scan(-1)) - bold_info("PROCESS version", MFILE.data["procver"].get_scan(-1)) - bold_info("Run description", MFILE.data["runtitle"].get_scan(-1)) - - # Top level comment - output_line(bold("IN.DAT Comment")) - heading_comment("header-title") - - heading(1, "Contents") - - content_heading("Diagrams") - - for k, v in DATA["projects"].items(): - - result = check_empty(k) - - if result["tot_count"] != 0: - - content_heading(k) - - if k == "General": - content_subheading("Radial Build") - content_subheading("Vertical Build") - - # for item in v["mfile_sections"]: - # content_subheading(item) - - -def constraint_comments(ky, iccs): - """ - Display constraint comments for this module - """ - - for icc in iccs: - con_key = "constraint-{0}".format(icc) - if con_key in COMMENTS.keys(): - con_name = ICC_FULL[str(icc)]["name"] - comment = " ".join(COMMENTS[con_key]) - output_below_table_comment(icc, con_name, comment) - - -def output_constraints(k): - """ - Output constraints for section k - """ - - con_mods = DATA["projects"][k]["icc_modules"] - MODULE_ICC_LIST = sum( - [MODULES[key] for key in MODULES.keys() if key in con_mods], [] - ) - - heading(3, "Constraints") - - table_heading( - [ - "Constraint", - "Description", - "F-Value Name", - "F-Value Value", - "Limit Name", - "Limit", - "Value", - ] - ) - - for item in CONSTRAINTS: - if item in MODULE_ICC_LIST: - - con_name = ICC_FULL[str(item)]["name"] - - if ICC_VARS[str(item)] != "consistency" and ICC_VARS[str(item)] != "empty": - if "f" in ICC_VARS[str(item)].keys(): - con_f_val_name = ICC_VARS[str(item)]["f"] - if con_f_val_name not in MFILE_LIST: - if con_f_val_name in IT_VAR_VALUES.keys(): - con_f_val = IT_VAR_VALUES[con_f_val_name] - else: - if con_f_val_name in INPUTS_LIST: - con_f_val = INFILE.data[con_f_val_name].value - else: - con_f_val = IXC_DEF[con_f_val_name] - con_f_val = bold(con_f_val) - else: - con_f_val = MFILE.data[con_f_val_name].get_scan(-1) - - con_lim_name = ICC_VARS[str(item)]["v"] - - if con_lim_name in MFILE.data.keys(): - con_lim = MFILE.data[con_lim_name].get_scan(-1) - else: - if con_lim_name in DEFAULTS.keys(): - con_lim = DEFAULTS[con_lim_name] - else: - con_lim = bold("calculated") - else: - con_f_val_name = "-" - con_lim_name = "consistency" - con_f_val = "-" - con_lim = "-" - - if con_f_val != "-" and con_lim != "-" and con_lim != bold("calculated"): - if isinstance(str): - con_lim = unbold(con_lim) - if isinstance(con_f_val, str): - con_f_val = unbold(con_f_val) - actual_value = "{0:.2e}".format(float(con_lim) * float(con_f_val)) - con_lim = "{0:.2e}".format(float(con_lim)) - con_f_val = "{0:.2e}".format(float(con_f_val)) - else: - actual_value = "-" - - # if item in MODULES[con_mod]: - table_line( - [ - str(item), - con_name, - con_f_val_name, - con_f_val, - con_lim_name, - con_lim, - actual_value, - ], - ".4g", - ) - - constraint_comments(k, MODULE_ICC_LIST) - - -def iteration_comments(ky, ixcs): - """ - Display iteration comments for this module - """ - - for ixc in ixcs: - itv_key = "iteration-variable-{0}".format(ixc) - if itv_key in COMMENTS.keys(): - itv_name = IXC_FULL[str(ixc)]["name"] - comment = " ".join(COMMENTS[itv_key]) - if len(comment) > 1: - output_below_table_comment(ixc, itv_name, comment) - - -def output_itvars(k): - """ - Output iteration variables for section k - """ - - mod_names = DATA["projects"][k]["in_dat_modules"] - MODULES_IXC_NAMES_LIST = [ - IXC_FULL[str(ixc)]["name"] - for ixc in IT_VARS - if any( - IXC_FULL[str(ixc)]["name"] in MODULES_FULL[mod_name] - for mod_name in mod_names - ) - ] - - MODULES_IXC_NUMBERS_LIST = [ - ixc - for ixc in IT_VARS - if any( - IXC_FULL[str(ixc)]["name"] in MODULES_FULL[mod_name] - for mod_name in mod_names - ) - ] - - heading(3, "Iteration Variables") - output_line("* Values in **bold** are **not default** but user inputs.") - table_heading( - [ - "No.", - "Name", - "Final Value", - "Description", - "Starting Value", - "Lower Bound", - "Upper Bound", - ] - ) - - for item in IT_VARS: - - item_name = IXC_FULL[str(item)]["name"] - if "fimp(" in item_name: - item_description = DES_FIMP[item_name] - else: - item_description = DESCRIPTIONS[item_name].split("\n")[0] - - if item_name in IT_VAR_VALUES.keys(): - item_value = IT_VAR_VALUES[item_name] - else: - item_value = MFILE.data[item_name].get_scan(-1) - - if item_name in INPUTS_LIST: - starting_value = INFILE.data[item_name].value - starting_value = bold(starting_value) - else: - if "fimp(" in item_name: - starting_value = item_value - else: - starting_value = IXC_DEF[item_name] - - if item_name in MODULES_IXC_NAMES_LIST: - - if str(item) in BOUNDS.keys(): - if "l" in BOUNDS[str(item)].keys(): - low_bound = BOUNDS[str(item)]["l"] - low_bound = bold(low_bound) - else: - low_bound = IXC_FULL[str(item)]["lb"] - - if "u" in BOUNDS[str(item)].keys(): - up_bound = BOUNDS[str(item)]["u"] - up_bound = bold(up_bound) - else: - up_bound = IXC_FULL[str(item)]["ub"] - - else: - low_bound = IXC_FULL[str(item)]["lb"] - up_bound = IXC_FULL[str(item)]["ub"] - - table_line( - [ - str(item), - code(item_name), - item_value, - item_description, - starting_value, - low_bound, - up_bound, - ], - ".4g", - ) - - iteration_comments(k, MODULES_IXC_NUMBERS_LIST) - - -def output_inputs(k): - """ - Output inputs for section k - """ - - mod_names = DATA["projects"][k]["in_dat_modules"] - MODULE_INPUT_LIST = [ - item - for item in INPUTS_LIST - if any(item in MODULES_FULL[mod_name] for mod_name in mod_names) - ] - - heading(3, "Inputs") - table_heading(["Input", "Value", "Description", "Comment"]) - - for item in INPUTS_LIST: - if item in MODULE_INPUT_LIST: - if item not in IT_VAR_LIST and item not in DATA["exclusions"]["inputs"]: - - item_value = INFILE.data[item].value - item_des = DESCRIPTIONS[item].split("\n")[0] - - if "in-" + item in COMMENTS.keys(): - comment = COMMENTS["in-" + item][0].replace("\n", "") - if comment == "": - comment = "" - else: - comment = "" - - if "fimp(" in item: - i_des = DES_FIMP[item] - - if isinstance(item_value, list): - table_line([code(item), bold("array"), item_des], ".4g") - for i in range(len(item_value)): - if "fimp(" in item: - item_des = i_des[i] - else: - item_des = "-" - table_line( - [ - code(item) + "[{0}]".format(i), - item_value[i], - item_des[4:], - comment, - ], - ".4g", - ) - elif "," in item_value: - table_line([code(item), bold("array"), item_des], ".4g") - item_value = item_value.split(",") - for i in range(len(item_value)): - if item_value[i] != "": - table_line( - [ - code(item) + "[{0}]".format(i), - item_value[i], - "-", - comment, - ], - ".4g", - ) - else: - table_line([code(item), item_value, item_des, comment], ".4g") - - -def output_outputs(k): - """ - Output outputs for section k - """ - - mod_names = DATA["projects"][k]["mfile_sections"] - MODULE_OUTPUTS_LIST = [ - item - for item in MFILE_LIST - if any(MFILE.data[item].var_mod == mod_name for mod_name in mod_names) - ] - - heading(3, "Outputs") - table_heading(["Output", "Value", "Description"]) - - for item in MFILE_LIST: - if item in MODULE_OUTPUTS_LIST: - - item_value = MFILE.data[item].get_scan(-1) - if item not in INPUTS_LIST: - - try: - item_des = MFILE.data[item].var_description.replace("_", " ") - if item_des.lower() == item.lower(): - item_name = "" - else: - item_name = code(item) - table_line( - [item_name.replace(" ", "_"), item_value, item_des], ".4g" - ) - except AttributeError: - print("Warning: Skipping item: {0}".format(item)) - pass - - -def vertical_build(): - """ - Output for vertical build - """ - - heading(2, "Vertical Build") - table_heading(["Name", "Thickness [m]", "Height [m]", "Description"]) - - if MFILE.data["i_single_null"].get_scan(-1): - vert_build = DATA["machine build"]["vertical"]["single"] - else: - vert_build = DATA["machine build"]["vertical"]["double"] - - tot_height = ( - MFILE.data["hmax"].get_scan(-1) - - MFILE.data["shldlth"].get_scan(-1) - - MFILE.data["divfix"].get_scan(-1) - - MFILE.data["vgap"].get_scan(-1) - + MFILE.data["vgaptop"].get_scan(-1) - + MFILE.data["shldtth"].get_scan(-1) - + MFILE.data["fwtth"].get_scan(-1) - + MFILE.data["blnktth"].get_scan(-1) - + MFILE.data["vvblgap"].get_scan(-1) - + MFILE.data["tfcth"].get_scan(-1) - ) - - v_build_sum = tot_height - - for k, v in vert_build.items(): - - item_name = vert_build[k]["name"] - item_des = vert_build[k]["des"] - item_combined = vert_build[k]["combination"] - - if item_combined: - combo_type = vert_build[k]["combo_type"] - items = item_name.replace(" ", "").split(combo_type) - item_1_value = MFILE.data[items[0]].get_scan(-1) - item_2_value = MFILE.data[items[1]].get_scan(-1) - if combo_type == "+": - item_value = item_1_value + item_2_value - elif combo_type == "*": - item_value = item_1_value * item_2_value - else: - item_value = MFILE.data[item_name].get_scan(-1) - - if int(k) < 10: - table_line([code(item_name), item_value, v_build_sum, item_des], ".3f") - v_build_sum -= item_value - elif int(k) == 10: - table_line([code(item_name), item_value, v_build_sum, item_des], ".3f") - v_build_sum = 0 - table_line([code("Midplane"), 0.0, v_build_sum, "Device midplane"], ".3f") - elif int(k) > 10: - v_build_sum -= item_value - table_line([code(item_name), item_value, v_build_sum, item_des], ".3f") - - -def radial_build(): - """ - Output the machine radial build - """ - - heading(2, "Radial Build") - table_heading(["Name", "Thickness [m]", "Radial Position [m]", "Description"]) - - r_build_sum = 0 - rad_build = DATA["machine build"]["radial"] - - for k, v in rad_build.items(): - - item_name = rad_build[k]["name"] - item_des = rad_build[k]["des"] - item_combined = rad_build[k]["combination"] - - if item_combined: - if rad_build[k]["combo_type"] == "+": - item_1 = item_name.split("+")[0].replace(" ", "") - item_2 = item_name.split("+")[1].replace(" ", "") - item_1_value = MFILE.data[item_1].get_scan(-1) - item_2_value = MFILE.data[item_2].get_scan(-1) - item_value = item_1_value + item_2_value - elif rad_build[k]["combo_type"] == "*": - item_1 = item_name.split("*")[0].replace(" ", "") - item_2 = item_name.split("*")[1].replace(" ", "") - item_1_value = MFILE.data[item_1].get_scan(-1) - item_2_value = MFILE.data[item_2].get_scan(-1) - item_value = item_1_value * item_2_value - else: - item_value = MFILE.data[item_name].get_scan(-1) - - r_build_sum += item_value - - table_line([code(item_name), item_value, r_build_sum, item_des], ".3f") - - return - - -def output_project(key, result): - """ - Output sections - """ - - heading(1, key) - return_to_top() - heading_comment("header-{0}".format(key)) - - if key == "General": - radial_build() - vertical_build() - - # Constraints - if result["con_count"] != 0: - output_constraints(key) - - if result["it_count"] != 0: - output_itvars(key) - - if result["in_count"] != 0: - output_inputs(key) - - if result["out_count"] != 0: - output_outputs(key) - - -def output_diagrams(): - """ - Embed plot_proc output - """ - - heading(1, "Diagrams") - try: - output_png("alt text", "process_diagram.png", "PROCESS output diagrams") - except Exception: - pass - - -def output_modules(): - """ - Output the modules to markdown - """ - - icc = INFILE.data["icc"].value - - for item in icc: - module = proc_dict.DICT_ICC_MODULE[str(item)] - MODULES.setdefault(module, []).append(item) - - get_indat_comments() - - output_contents() - - output_diagrams() - - for k, v in DATA["projects"].items(): - - result = check_empty(k) - - if result["tot_count"] != 0: - - output_project(k, result) - - OUTFILE.close() - - -def main(cargs): - """ - Main - """ - - output_modules() - - export( - path="output_summary.md", - password="e35a21bfce5462bebbecc2e43d12bf4ec2ba469d", - render_wide=True, - render_inline=True, - out_filename=cargs.o + ".html", - title="PROCESS Output", - ) - - print("Over...") - - return - - -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -if __name__ == "__main__": - - PARSER = argparse.ArgumentParser( - description="Create PROCESS output document." - "For info contact james.morris2@ukaea.uk" - ) - - PARSER.add_argument( - "-f", metavar="MFILENAME", type=str, default="", help="specify PROCESS MFILE" - ) - - PARSER.add_argument( - "-o", - metavar="OUTFILENAME", - type=str, - default="output_summary", - help="specify output file", - ) - - COMMAND_ARGS = PARSER.parse_args() - - # read json - try: - DATA = json.load(open("output_summary.json"), object_pairs_hook=OrderedDict) - except ValueError as e: - print("Error in JSON config file. Please correct error: {0}".format(e)) - sys.exit() - - if COMMAND_ARGS.f: - - # read mfile - MFILE = MFile(filename=COMMAND_ARGS.f) - MFILE_LIST = MFILE.data.keys() - - # read input file - INFILE = InDat(filename=COMMAND_ARGS.f, start_line=MFILE.mfile_end) - INPUTS_LIST = INFILE.data.keys() - - # initialise COMMENTS dictionary - COMMENTS = dict() - - CONSTRAINTS = INFILE.data["icc"].value - IT_VARS = INFILE.data["ixc"].value - IT_VAR_LIST = [IXC_FULL[str(itvar)]["name"] for itvar in IT_VARS] - BOUNDS = INFILE.data["bounds"].value - IT_VAR_VALUES = get_itvar_values() - - OUTFILE = open(COMMAND_ARGS.o + ".md", "w") - - main(COMMAND_ARGS) - - else: - print("Please enter a reference MFILE with -f!") diff --git a/utilities/plot_comparison.py b/utilities/plot_comparison.py deleted file mode 100755 index a39cb1b2..00000000 --- a/utilities/plot_comparison.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python -""" -Code to display the evolution of two variables in a selection of MFILES - -Author: H. Lux (Hanni.Lux@ukaea.uk) - -Input files: -List of MFILE.DATs -""" - - -####################### -# imported libraries - -import argparse -from matplotlib import rc -from pylab import figure, xlabel, ylabel, plot, show, savefig -import process.io.mfile as mf - -rc("font", size=25) -rc("lines", lw=1.5) # is overwritten by setting coulours/linestylies -rc(("xtick", "ytick"), labelsize=20) -rc("figure", figsize=(8, 6)) -rc("figure.subplot", bottom=0.18) # was 0.12 -rc("figure.subplot", left=0.19) -rc("figure.subplot", right=0.81) - - -if __name__ == "__main__": - - ########################################################### - # Usage - - PARSER = argparse.ArgumentParser( - description="Program to display\ - the evolution of two variables in a selection of MFILEs." - ) - - PARSER.add_argument( - "-x", "--xaxis", default="rmajor", help="x-axis, default=rmajor" - ) - - PARSER.add_argument( - "-y", "--yaxis", default="powfmw", help="y-axis, default=powfmw" - ) - - PARSER.add_argument( - "mfiles", - metavar="f", - type=str, - default="MFILE.DAT", - nargs="*", - help="list of MFiles to be plotted; \ - default = MFILE.DAT", - ) - - PARSER.add_argument( - "-e", - "--end", - default="pdf", - help="file format default =\ - pdf", - ) - - ARGS = PARSER.parse_args() - - XARR = [] - YARR = [] - - for mfile in ARGS.mfiles: - - m_file = mf.MFile(mfile) - scan = -1 - - XARR += [m_file.data[ARGS.xaxis.lower()].get_scan(scan)] - YARR += [m_file.data[ARGS.yaxis.lower()].get_scan(scan)] - - figure() - xlabel(ARGS.xaxis) - ylabel(ARGS.yaxis) - plot(XARR, YARR, "ks") - savefig( - "Comparison_" - + ARGS.xaxis.replace("/", "_") - + "_" - + ARGS.yaxis.replace("/", "_") - + "." - + ARGS.end - ) - - show() diff --git a/utilities/plot_mfile_sweep.py b/utilities/plot_mfile_sweep.py deleted file mode 100755 index 16791972..00000000 --- a/utilities/plot_mfile_sweep.py +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/env python - -""" - - Plots MFILE.DAT data. - - James Morris - CCFE - - Notes: - + 30/04/2014: Initial version created - - Compatible with PROCESS version 274 -""" - -import argparse -import matplotlib.pyplot as plt -import process.io.mfile as mf - - -def plot_scan(cargs, m_file_data): - """Function to plot chosen variables. - - Args: - command arguments --> arguments given at command line - - """ - - nscans = int(m_file_data.data["isweep"].get_scan(-1)) - x = range(1, nscans + 1) - data = {} - for param in cargs.p: - values = m_file_data.data[param].get_scans()[:nscans] - if values[0] != 0.0: - values = [val / values[0] for val in values] - if param not in data.keys(): - data[param] = values - - fig = plt.figure(figsize=(12, 9), dpi=100) - ax = fig.add_subplot(111) - - for key in data.keys(): - ax.plot(x, data[key], label=key) - ax.legend(loc="upper left") - ax.set_xlabel("Scan Number") - ax.set_ylabel("Parameters (Normalised to 1st scan)") - if cargs.show: - plt.show() - else: - plt.savefig(cargs.o, orientation="landscape") - - -if __name__ == "__main__": - - # Setup command line arguments - parser = argparse.ArgumentParser(description="Plot sweep values from " "MFILE.DAT") - - parser.add_argument( - "-f", - metavar="f", - type=str, - default="MFILE.DAT", - help="File to read as MFILE.DAT", - ) - - parser.add_argument( - "-o", - metavar="o", - type=str, - default="sweep_fig.pdf", - help="File to save plot to", - ) - - parser.add_argument( - "-p", - metavar="p", - type=str, - nargs="+", - help="Variables for the plot (e.g. -p rmajor bt te ..." ")", - ) - - parser.add_argument( - "--show", - help="Show plot to screen instead of saving " "to file", - action="store_true", - ) - - args = parser.parse_args() - - # read mfile - mfile_data = mf.MFile(args.f) - - # plot requested parameters all normalised to the first scan - plot_scan(args, mfile_data) diff --git a/utilities/plot_opti_process.py b/utilities/plot_opti_process.py deleted file mode 100644 index 0bf9f127..00000000 --- a/utilities/plot_opti_process.py +++ /dev/null @@ -1,687 +0,0 @@ -#!/usr/bin/env python -""" Set of macro that plots information about the VMNCON optimization - WARNING : YOU HAVE TO CREATE THE PROCESS DICT IN ORDER TO USE THIS MACRO !! - - - PART 1 : The file OPT.DAT is read - _________________________________ - This file contains : - - The PROCESS id numers of - 1) The constraints : constraints_indexes - 2) The variables : variables_indexes - - The evolution with each VMCON interation of - Description | variable name in vmcon | variables name in the macro - 1) The figure of merit | abs(obj) | figures_of_merit [] - 2) The VMCON convergence criteria | sum | vmcon_convergence [] - 3) The squared sum of the constraints residuals | sqsumsq | constraints_quad_sum [] - 4) The residuals of each constraints | conf[] | constraints [[]] - 5) The variables values normalized to first the interation one | x[] | variables [[]] - _________________________________ - - - PART 2 : Plot - _____________ - The pyplot routines - - Plot 1 : Figure of merit plot - VMCON index evolution of the figure of merit - - Plot 2 : Convergence plot : - VMCON index evolution of - - The VMCON convergence parameter - - The quadratic sum of the constraints residuals - - The maximum between the VMCON convergence parameter and the constraints residual quadratic sum - This quantity is the one USED to decide that optimizer has converged or not - - Plot 3 : Dominant constraints plots : - The last VMCON iteration is used order to rank the constraints with their residual values and to plot the - VMCON index evolution of - - The n_ploted_const dominant constraints values (n_ploted_const is used defined) - - The quadratic sum of the dominant constraints - - The total quadratic sum of the constraints (sqsumsq) - The difference of the two quadratic sums allow to check of any other variables contribute to the constaints for any step of the optimization - - Plot 4 : Selected constraint plot : - VMCON index evolution of - - A constraint selected by its PROCESS number defined in vardes - - The total quadratic sum of the constraints (sqsumsq) - Help to focus on a given constraint ... - - Plot 5 : Major variable evolution - The variation amplitude of the optimization varaibles (max(var) - min(var)) is used to rank the variables and to plot the - VMCON index evolution of the n_var_plots dominant variables (n_var_plots is used defined) - - Plot 6 : Selected vaiable pair trajectory plot - The x and y variables are selected using the PROCESS variable index defined in vardes - The z axis also shows ln( max( sum, sqsumsq ) ) - - The plots are saved in : - - a used defined format (it has to be supported by pyplot) - - a folder called OPT_plots folder - - The output file naming convention still has to be discussed - _____________ - """ -import matplotlib -import os -import argparse -from argparse import RawTextHelpFormatter -import numpy as np -import matplotlib.pyplot as plt -from create_dicts import get_dicts - -matplotlib.use("Agg") - - -# Load dicts from dicts JSON file -p_dicts = get_dicts() - - -if __name__ == "__main__": - - # PARSING USER PARAMETERS - # please execute 'python plot_opti_process.py -h' for input information - # Option definition - # ----------------- - parser = argparse.ArgumentParser( - description="Plot optimization information", - formatter_class=RawTextHelpFormatter, - ) - parser.add_argument( - "-p", - "--plot_selec", - nargs="?", - default="all", - help="Plot selection string :\n - If it containts 'FoM' -> Figure of Merit plot \n - If it containts 'conv' -> convergence criteria plot \n - If it containts 'domconst' -> dominant constraint plot\n - If it containts 'allconst' -> a plot for each constraint stored in All_Const/\n - If it containts 'domvar' -> dominant variables plot \n - If it containts 'allvar' -> a plot for each variable in All_Var/\n - If it containts 'all' -> all the mentionned plots (default value)", - ) - parser.add_argument( - "-ndc", - "--n_dom_const", - nargs="?", - default=3, - help="number of plotted dominant constaints (default=3)", - type=int, - ) - parser.add_argument( - "-ndv", - "--n_dom_var", - nargs="?", - default=4, - help="number of plotted dominant variables (default=4)", - type=int, - ) - parser.add_argument( - "-ic", - "--i_const", - nargs="?", - default=-1, - help="Selection of the constraint to be plotted (PROCESS number defined in vardes, default=-1)", - type=int, - ) - parser.add_argument( - "-ixv", - "--i_X_var", - nargs="?", - default=-1, - help="X variable on pair plot selection (PROCESS number defined in vardes, default=-1)", - type=int, - ) - parser.add_argument( - "-iyv", - "--i_Y_var", - nargs="?", - default=-1, - help="Y variable on pair plot selection (PROCESS number defined in vardes, default=-1)", - type=int, - ) - parser.add_argument( - "-sf", - "--save_format", - nargs="?", - default="pdf", - help="output format (default='pdf') ", - ) - parser.add_argument( - "-as", - "--axis_font_size", - nargs="?", - default=18, - help="Axis label font size selection (default=18)", - type=int, - ) - - # Option argument extraction - # -------------------------- - args = parser.parse_args() - plot_selection = str(args.plot_selec) - n_ploted_const = int(args.n_dom_const) - n_var_plots = int(args.n_dom_var) - constraint_selection = int(args.i_const) - x_variable_selection = int(args.i_X_var) - y_variable_selection = int(args.i_Y_var) - save_format = str(args.save_format) - - # Hardcoded input : Plot option selection (color and line style) - plot_opt = [ - "r-", - "b-", - "g-", - "m-", - "r--", - "b--", - "g--", - "m--", - "r:", - "b:", - "g:", - "m:", - ] - - # Boolean swiches for plot selection - # ----------------------------------- - plot_FoM = ("FoM" in plot_selection) or "all" == plot_selection - plot_conv = ("conv" in plot_selection) or "all" == plot_selection - plot_const_dom = ("domconst" in plot_selection) or "all" == plot_selection - plot_var_dom = ("domvar" in plot_selection) or "all" == plot_selection - plot_var_all = ("allvar" in plot_selection) or "all" == plot_selection - plot_const_all = ("allconst" in plot_selection) or "all" == plot_selection - - plot_const_spe = int(-1) != constraint_selection - plot_var_spe_pair = ( - int(-1) != x_variable_selection and int(-1) != y_variable_selection - ) - axis_font_size = int(args.axis_font_size) - ##################################################### - - # Step 1 : Data extraction - # ---------------------------------------------------------------------------------------------- - plot_indexes = list() - vmcon_indexes = list() - figures_of_merit = list() - vmcon_convergence = list() - constraints_quad_sum = list() - convergence_parameter = list() - constraints = list() - variables = list() - gradients = list() - - # Opening the pandora box - with open("../bin/OPT.DAT", "r") as opt_data: - opt_data_lines = opt_data.readlines() - opt_data_lines = [ - line.strip("\n") for line in opt_data_lines - ] # Just removing the \n statment - - # Optimization setup information extraction - # ------------------------------------------ - # Constrains - n_constraints = int(opt_data_lines[1]) - constraints_indexes = [ - int(constraints_str) - for constraints_str in opt_data_lines[4] - .replace(" ", " ") - .replace(" ", " ")[1:] - .split(" ") - ] - - # Variables - n_variables = int(opt_data_lines[7]) - variables_indexes = [ - int(constraints_str) - for constraints_str in opt_data_lines[10] - .replace(" ", " ") - .replace(" ", " ")[1:] - .split(" ") - ] - - # Number of VMCON iterations - n_vmcon = int(len(opt_data_lines) - 15) - - # Plot indexes - plot_indexes = [ii + 1 for ii in range(n_vmcon)] - # ------------------------------------------ - - # VMCON data extraction - # ---------------------- - # Full data extraction - data = list() - data_str = list() - for ii_vmcon in range(0, n_vmcon): - data_str.append(opt_data_lines[15 + ii_vmcon].split(" ")) - ii = int(0) - while ii != len(data_str[ii_vmcon]): - if data_str[ii_vmcon][ii] == "": - data_str[ii_vmcon].pop(ii) - else: - ii += 1 - - data.append(list()) - for data_ii in data_str[ii_vmcon]: - data[ii_vmcon].append(float(data_ii)) - - # VMCON global indexes - for ii_vmcon in range(0, n_vmcon): - vmcon_indexes.append(int(data[ii_vmcon][0])) - figures_of_merit.append(float(data[ii_vmcon][1])) - vmcon_convergence.append(float(data[ii_vmcon][2])) - constraints_quad_sum.append(float(data[ii_vmcon][3])) - convergence_parameter.append( - max(vmcon_convergence[ii_vmcon], constraints_quad_sum[ii_vmcon]) - ) - - # Constrains - for ii_constraint in range(0, n_constraints): - constraints.append(list()) - for ii_vmcon in range(0, n_vmcon): - constraints[ii_constraint].append( - abs(float(data[ii_vmcon][ii_constraint + 4])) - ) - - # Variables - for ii_variables in range(0, n_variables): - variables.append(list()) - gradients.append(list()) - for ii_vmcon in range(0, n_vmcon): - variables[ii_variables].append( - float(data[ii_vmcon][4 + n_constraints + ii_variables]) - ) - gradients[ii_variables].append( - float( - data[ii_vmcon][4 + n_constraints + n_variables + ii_variables] - ) - ) - - variables = variables[:-1] - gradients = gradients[:-1] - constraints = constraints[:-1] - # --------------------- - - # Step 2 : Plotting - # ---------------------------------------------------------------------------------------------- - tolerance = float(1e-7) - if not os.path.isdir("OPT_plots"): - os.mkdir("OPT_plots") - - # PLOT 1 : Figure of merit evolution - # ----------------------------------- - if plot_FoM: - - # Plot - plt.plot(plot_indexes, figures_of_merit, "g-") - - # Range - max_FoM = max(figures_of_merit) - min_FoM = min(figures_of_merit) - y_range_FoM = max_FoM - min_FoM - y_min = min_FoM - 0.1 * y_range_FoM - y_max = max_FoM + 0.1 * y_range_FoM - x_min = 0 - x_max = n_vmcon + 2 - - # Cosmetics - plt.xlabel("$VMCON$ iteration", fontsize=axis_font_size) - plt.ylabel("Figure of merit", fontsize=axis_font_size) - plt.axis([x_min, x_max, y_min, y_max]) - plt.grid(True) - plt.tight_layout() - plt.savefig("OPT_plots/FoM_evolution." + save_format, format=save_format) - plt.close() - # ----------------------------------- - - # Plot 2 : Convergence parameters - # -------------------------------- - if plot_conv: - - # Ranges - x_min = 0 - x_max = n_vmcon + 2 - y_min = 1e-8 - y_max = 1.0 - - # Plot - plt.plot(plot_indexes, vmcon_convergence, "b-", label="VMCON conv") - plt.plot(plot_indexes, constraints_quad_sum, "c-", label="const quad sum") - plt.plot(plot_indexes, convergence_parameter, "k:", label="final conv ") - plt.plot([x_min, x_max], [tolerance, tolerance], "k-") - - # Cosmetics - plt.legend(loc="best") - plt.xlabel("$VMCON$ iteration", fontsize=axis_font_size) - plt.ylabel("Criteria", fontsize=axis_font_size) - plt.yscale("log") - plt.axis([x_min, x_max, y_min, y_max]) - plt.grid(True) - plt.tight_layout() - plt.savefig( - "OPT_plots/convergence_evolution." + save_format, format=save_format - ) - plt.close() - # -------------------------------- - - # Plots 3 : Domimant constraints parameters - # ------------------------------------------ - # This graph will show the n_ploted_const constrains that dominates sqsumsq at the last iteration, their quadratic sum and sqsumsq - # --- - if n_ploted_const > n_constraints: - print( - "Impossible to print {} constraints as only {} were used for the run".format( - n_ploted_const, n_constraints - ) - ) - print(" -> Plotting {} constraints instead".format(n_constraints)) - n_ploted_const = n_constraints - - if plot_const_dom: - # Avoiding trying to plot more constraints than the total number of constraints used for the run - n_ploted_const = min(n_ploted_const, n_constraints) - - # Finiding n_ploted_const constraints that has the largest value at the last point - dominant_constaints_indexes = [] - last_constraints = data[-1][4 : n_constraints + 4] - last_constraints = [ - abs(ii_last_constraint) for ii_last_constraint in last_constraints - ] - - for ii in range(0, n_ploted_const): - ii_dominant_constaints = int(last_constraints.index(max(last_constraints))) - dominant_constaints_indexes.append(ii_dominant_constaints) - last_constraints[ii_dominant_constaints] = -1.0 - - # Dominant constraints quadratic sum calculation - dominant_quad_sum = [] - for ii_vmcon in range(0, n_vmcon): - dominant_quad_sum.append(float(0)) - for ii in range(0, n_ploted_const): - dominant_quad_sum[ii_vmcon] += ( - constraints[dominant_constaints_indexes[ii]][ii_vmcon] - * constraints[dominant_constaints_indexes[ii]][ii_vmcon] - ) - dominant_quad_sum[ii_vmcon] = pow(dominant_quad_sum[ii_vmcon], 0.5) - - # Plot option settings - if n_ploted_const > len(plot_opt): - for ii in range(0, n_ploted_const - len(plot_opt)): - plot_opt.append("m:") - - # Ranges - x_min = 0 - x_max = n_vmcon + 2 - y_min = 1e-8 - y_max = 1.0 - - # Plot - for ii in range(0, n_ploted_const): - constraint_name = p_dicts["DICT_ICC_FULL"][ - str(constraints_indexes[dominant_constaints_indexes[ii]]) - ]["name"] - if n_ploted_const > 5: - constraint_name = constraint_name.replace( - "Central solenoid", "CS" - ).replace("central solenoid", "CS") - constraint_name = constraint_name[:17] - - plt.plot( - plot_indexes, - constraints[dominant_constaints_indexes[ii]], - plot_opt[ii], - label=str(ii + 1) - + ": " - + constraint_name - + " (" - + str(constraints_indexes[dominant_constaints_indexes[ii]]) - + ")", - ) - plt.plot( - plot_indexes, dominant_quad_sum, "k--", label="dominant const quad sum " - ) - plt.plot(plot_indexes, constraints_quad_sum, "k:", label="total const quad sum") - plt.plot([x_min, x_max], [tolerance, tolerance], "k-") - - # Actions if the legend gets too big - if n_ploted_const > 5: - x_max *= 2 - y_min = 1e-12 - - # Cosmetics - plt.legend(loc="best") - plt.yscale("log") - plt.xlabel("$VMCON$ iteration", fontsize=axis_font_size) - plt.ylabel("Constraint", fontsize=axis_font_size) - plt.axis([x_min, x_max, y_min, y_max]) - plt.grid(True) - plt.tight_layout() - plt.savefig( - "OPT_plots/constraints_evolution." + save_format, format=save_format - ) - # plt.show() - plt.close() - # ------------------------------------------ - - # Plot 4 : Specific constraint - # ---------------------------- - # Check if the constraint is used - if plot_const_spe and (constraint_selection not in constraints_indexes): - print( - "The constraint {} was not used for this PROCESS run".format( - constraint_selection - ) - ) - print(" -> Try with one of these {}".format(constraints_indexes)) - - elif plot_const_spe: - # Constraint selection - ii_constraint = constraints_indexes.index(constraint_selection) - constraint_name = p_dicts["DICT_ICC_FULL"][str(constraint_selection)]["name"] - - # Ranges - x_min = 0 - x_max = n_vmcon + 2 - y_min = 1e-12 - y_max = 1.0 - - # Plot - plt.plot(plot_indexes, constraints[ii_constraint], "r-", label=constraint_name) - plt.plot(plot_indexes, constraints_quad_sum, "k:", label="total const quad sum") - plt.plot([x_min, x_max], [tolerance, tolerance], "k-") - - # Cosmetics - plt.legend(loc="best") - plt.yscale("log") - plt.ylabel("Constraints", fontsize=axis_font_size) - plt.xlabel("$VMCON$ iteration", fontsize=axis_font_size) - plt.axis([x_min, x_max, y_min, y_max]) - plt.grid(True) - plt.tight_layout() - plt.savefig( - "OPT_plots/" - + str(p_dicts["DICT_ICC_FULL"][str(constraint_selection)]["name"]) - + "_dominant_constraints_evolution." - + save_format, - format=save_format, - ) - # plt.show() - plt.close() - # ---------------------------- - - # Plot 5 : Major variable evolution - # --------------------------------- - if plot_var_dom and n_var_plots > n_variables: - print( - "Impossible to print {} variables as only {} were used for the run".format( - n_var_plots, n_variables - ) - ) - print(" -> Plotting {} variables instead".format(n_variables)) - n_var_plots = n_variables - - if plot_var_dom: - if n_var_plots > len(plot_opt): - for ii in range(0, n_var_plots - len(plot_opt)): - plot_opt.append("m:") - - # Select the two variables that vary the most and plot them - ranked_variables_index = [] - variables_ranges = [max(variable) - min(variable) for variable in variables] - - for ii in range(0, n_variables): - ranked_variables_index.append(variables_ranges.index(max(variables_ranges))) - variables_ranges[ranked_variables_index[ii]] = -1.0 - - # Plot - for ii in range(0, n_var_plots): - leg_label = "{} : {} ({})".format( - ii + 1, - p_dicts["DICT_IXC_SIMPLE"][ - str(variables_indexes[ranked_variables_index[ii]]) - ], - variables_indexes[ranked_variables_index[ii]], - ) - plt.plot( - plot_indexes, - variables[ranked_variables_index[ii]], - plot_opt[ii], - label=leg_label, - ) - - # Action if the legend gets too big ... - if n_var_plots > 6: - plt.xlim([0.0, int(1.8 * float(n_vmcon))]) - - # Cosmetics - plt.legend(loc="best") - plt.xlabel("$VMCON$ iteration", fontsize=axis_font_size) - plt.ylabel("variable", fontsize=axis_font_size) - plt.grid(True) - plt.tight_layout() - plt.savefig( - "OPT_plots/dominant_variables_evolution." + save_format, format=save_format - ) - # plt.show() - plt.close() - # --------------------------------- - - # Plot 6 : Specific variable pair path - # ------------------------------------ - # Check if the variables are used in the considered PROCESS run - if plot_var_spe_pair and (x_variable_selection not in variables_indexes): - print( - "The PROCESS variable {} used of the X axis is not used for the run".format( - x_variable_selection - ) - ) - print(" -> Try with one of these {}".format(variables_indexes)) - elif plot_var_spe_pair and (y_variable_selection not in variables_indexes): - print( - "The PROCESS variable {} used of the X axis is not used for the run".format( - y_variable_selection - ) - ) - print(" -> Try with one of these {}".format(variables_indexes)) - - elif plot_var_spe_pair: - # Retrieving the constraint index - ii_x_variable = variables_indexes.index(x_variable_selection) - ii_y_variable = variables_indexes.index(y_variable_selection) - - # Plot - ln_convergence_parameter = [ - np.log10(conv_param) for conv_param in convergence_parameter - ] # the ln of the convergence parameter is taken for visibility isues - scat = plt.scatter( - variables[ii_x_variable], - variables[ii_y_variable], - c=ln_convergence_parameter, - ) - plt.plot(variables[ii_x_variable], variables[ii_y_variable], "k-") - plt.grid(True) - plt.xlabel( - p_dicts["DICT_IXC_SIMPLE"][str(x_variable_selection)], - fontsize=axis_font_size, - ) - plt.ylabel( - p_dicts["DICT_IXC_SIMPLE"][str(y_variable_selection)], - fontsize=axis_font_size, - ) - plot_colorbar = plt.colorbar(scat) - plot_colorbar.set_label("$ln_{10}$(final conv)", size=axis_font_size) - plt.tight_layout() - plt.savefig( - "OPT_plots/var" - + str(x_variable_selection) - + "_vs_var" - + str(y_variable_selection) - + "." - + save_format, - format=save_format, - ) - plt.close() - # ------------------------------------ - - # Plot 7 : plot all each inputs variables evolution - # -------------------------------------------------- - # Creating the folder containing the variables plots - if plot_var_all: - if not os.path.isdir("OPT_plots/All_Var"): - os.mkdir("OPT_plots/All_Var") - - ii = 0 - for variable in variables: - - variable_name = p_dicts["DICT_IXC_SIMPLE"][str(variables_indexes[ii])] - variable_name = variable_name.replace("(", "").replace(")", "") - - plt.plot(plot_indexes, variable, "k-") - plt.xlabel("$VMCON$ iteration", fontsize=axis_font_size) - plt.ylabel(variable_name, fontsize=axis_font_size) - plt.grid(True) - plt.tight_layout() - plt.savefig( - "OPT_plots/All_Var/{}_evolution.{}".format(variable_name, save_format), - format=save_format, - ) - plt.close() - ii += 1 - # -------------------------------------------------- - - # Plot 8 : plot all each inputs constraint evolution - # --------------------------------------------------- - # Creating the folder containing the variables plots - if plot_const_all: - if not os.path.isdir("OPT_plots/All_Const"): - os.mkdir("OPT_plots/All_Const") - - # Ranges - x_min = 0 - x_max = n_vmcon + 2 - y_min = 1e-12 - y_max = 1.0 - - ii = 0 - for constraint in constraints: - - constraint_name = p_dicts["DICT_ICC_FULL"][str(constraints_indexes[ii])][ - "name" - ] - plt.plot(plot_indexes, constraint, "r-", label=constraint_name) - plt.plot(plot_indexes, constraints_quad_sum, "k--", label="const quad sum") - - # Cosmetics - plt.xlabel("$VMCON$ iteration", fontsize=axis_font_size) - plt.ylabel(constraint_name, fontsize=axis_font_size) - plt.legend(loc="best") - plt.yscale("log") - plt.axis([x_min, x_max, y_min, y_max]) - plt.grid(True) - plt.tight_layout() - - constraint_name_f = ( - constraint_name.replace(" ", "_").replace(")", "").replace("(", "") - ) - plt.savefig( - "OPT_plots/All_Const/{}_evolution.{}".format( - constraint_name_f, save_format - ), - format=save_format, - ) - plt.close() - ii += 1 - # --------------------------------------------------- diff --git a/utilities/plot_profiles.py b/utilities/plot_profiles.py deleted file mode 100644 index ece2bcc6..00000000 --- a/utilities/plot_profiles.py +++ /dev/null @@ -1,177 +0,0 @@ -#!/usr/bin/env python -""" - - Plots profiles from PROCESS for a number of MFILEs - - James Morris - 15/02/2019 - CCFE - -""" - -import argparse -import process.io.mfile as mf -import matplotlib -import matplotlib.pyplot as plt -import matplotlib.backends.backend_pdf as bpdf -import numpy as np - -matplotlib.rcParams["figure.max_open_warning"] = 40 - - -def plot_nprofile(prof, mfdat, scan): - """Function to plot density profile - Arguments: - prof --> axis object to add plot to - mfdat --> MFILE data object - scan --> scan number to plot - """ - xmin = 0 - xmax = 1 - ymin = 0 - ymax = 20 - prof.set_ylim([ymin, ymax]) - prof.set_xlim([xmin, xmax]) - prof.set_autoscaley_on(False) - prof.set_xlabel("r/a") - prof.set_ylabel("ne / 1e19 m-3") - prof.set_title("Density profile") - - ipedestal = mfdat.data["ipedestal"].get_scan(scan) - neped = mfdat.data["neped"].get_scan(scan) - nesep = mfdat.data["nesep"].get_scan(scan) - rhopedn = mfdat.data["rhopedn"].get_scan(scan) - alphan = mfdat.data["alphan"].get_scan(scan) - ne0 = mfdat.data["ne0"].get_scan(scan) - - if ipedestal == 1: - rhocore1 = np.linspace(0, 0.95 * rhopedn) - rhocore2 = np.linspace(0.95 * rhopedn, rhopedn) - rhocore = np.append(rhocore1, rhocore2) - ncore = neped + (ne0 - neped) * (1 - rhocore**2 / rhopedn**2) ** alphan - - rhosep = np.linspace(rhopedn, 1) - nsep = nesep + (neped - nesep) * (1 - rhosep) / (1 - min(0.9999, rhopedn)) - - rho = np.append(rhocore, rhosep) - ne = np.append(ncore, nsep) - else: - rho1 = np.linspace(0, 0.95) - rho2 = np.linspace(0.95, 1) - rho = np.append(rho1, rho2) - ne = ne0 * (1 - rho**2) ** alphan - ne = ne / 1e19 - prof.plot(rho, ne, label=mfdat.filename) - - -def plot_tprofile(prof, mfdat, scan): - """Function to plot temperature profile - Arguments: - prof --> axis object to add plot to - mfdat --> MFILE data object - scan --> scan number to plot - """ - xmin = 0 - xmax = 1 - ymin = 0 - ymax = 40 - prof.set_ylim([ymin, ymax]) - prof.set_xlim([xmin, xmax]) - prof.set_autoscaley_on(False) - prof.set_xlabel("r/a") - prof.set_ylabel("Te / keV") - prof.set_title("Temperature profile") - - ipedestal = mfdat.data["ipedestal"].get_scan(scan) - rhopedt = mfdat.data["rhopedt"].get_scan(scan) - tbeta = mfdat.data["tbeta"].get_scan(scan) - teped = mfdat.data["teped"].get_scan(scan) - tesep = mfdat.data["tesep"].get_scan(scan) - alphat = mfdat.data["alphat"].get_scan(scan) - te0 = mfdat.data["te0"].get_scan(scan) - - if ipedestal == 1: - rhocore1 = np.linspace(0, 0.9 * rhopedt) - rhocore2 = np.linspace(0.9 * rhopedt, rhopedt) - rhocore = np.append(rhocore1, rhocore2) - tcore = teped + (te0 - teped) * (1 - (rhocore / rhopedt) ** tbeta) ** alphat - - rhosep = np.linspace(rhopedt, 1) - tsep = tesep + (teped - tesep) * (1 - rhosep) / (1 - min(0.9999, rhopedt)) - - rho = np.append(rhocore, rhosep) - te = np.append(tcore, tsep) - else: - rho = np.linspace(0, 1) - te = te0 * (1 - rho**2) ** alphat - prof.plot(rho, te, label=mfdat.filename) - - -if __name__ == "__main__": - - # Setup command line arguments - parser = argparse.ArgumentParser( - description="Produces plots of density and temperature profiles, using MFILE(s). " - "For info contact james.morris2@ukaea.uk" - ) - - parser.add_argument( - "-f", - metavar="files", - type=str, - nargs="+", - default="MFILE.DAT", - help='specify input/output file paths as a list "-f ../file1 ../file2"', - ) - - parser.add_argument( - "-s", "--save", help="save plot as well as showing figure", action="store_true" - ) - - parser.add_argument("-n", type=int, help="Which scan to plot?") - - parser.add_argument( - "-o", - metavar="output_name", - type=str, - default="profiles.pdf", - help="name of output pdf", - ) - - args = parser.parse_args() - - if args.n: - scan = args.n - else: - scan = -1 - - # Setup plot area - fig = plt.figure(figsize=(12, 10), dpi=80) - - # Density plot - plot_1 = fig.add_subplot(211, aspect=1 / 20) - - # Temperature plot - plot_2 = fig.add_subplot(212, aspect=1 / 40) - - # read MFILE - if args.f != "MFILE.DAT": - for item in args.f: - m_file = mf.MFile(item) - plot_nprofile(plot_1, m_file, scan) - plot_tprofile(plot_2, m_file, scan) - - else: - m_file = mf.MFile("MFILE.DAT") - m_file = mf.MFile(item) - plot_nprofile(plot_1, m_file, scan) - plot_tprofile(plot_2, m_file, scan) - - plot_1.legend(loc="upper left", bbox_to_anchor=(1, 1)) - plot_2.legend(loc="upper left", bbox_to_anchor=(1, 1)) - - if args.save: - with bpdf.PdfPages(args.o) as pdf: - pdf.savefig(fig) - - plt.show(fig) diff --git a/utilities/plot_stress_tf.py b/utilities/plot_stress_tf.py index 9384b7d6..6a609c02 100644 --- a/utilities/plot_stress_tf.py +++ b/utilities/plot_stress_tf.py @@ -22,7 +22,6 @@ if __name__ == "__main__": - # PARSING USER PARAMETERS # please execute 'python plot_stress_tf.py -h' for input information # Option definition @@ -133,7 +132,6 @@ n_points = len(sig_file_data["Radius (m)"]) n_layers = int(n_points / n_radial_array_layer) for ii in range(n_layers): - # Full vector radius.append(list()) radial_stress.append(list()) @@ -458,7 +456,6 @@ # PLOT 1 : Stress summary # ------------------------ if plot_sig: - for ii in range(0, n_layers): plt.plot( radius[ii], @@ -573,7 +570,6 @@ # PLOT 2 : Smeared stress summary # ------------------------ if plot_sm_sig: - for ii in range(n_layers): plt.plot( radius[ii], @@ -680,7 +676,6 @@ # PLOT 3 : Strain summary # ------------------------ if plot_strain and len(sig_file_data) > 15: - for ii in range(n_layers): plt.plot( radius[ii], diff --git a/utilities/popcon.py b/utilities/popcon.py deleted file mode 100755 index 99f77959..00000000 --- a/utilities/popcon.py +++ /dev/null @@ -1,494 +0,0 @@ -#!/usr/bin/env python -""" - -POPCON (Plasma OPeration CONtour) Plot - (Power OutPut CONtour) Plot - -Stuart Muldrew (stuart.muldrew@ukaea.uk) -Updated version of code by PJ Knight and TN Todd -12/09/2018 - -Outputs POPCON plot for given input parameters. - -For reference, density and temperature profiles are of the form: - -n(r) = n0 (1 - (r/a)**2)**alphan (similarly temperature with index alphat) - -Line averaged value: nbar = n0 / sqrt(1.0 + alphan) - -Volume averaged value: = n0 / (1.0 + alphan) - or: = nbar / sqrt(1.0 + alphan) - - -History: -- Originally written by TNT in late 1980s (BBC Microcomputer / BBC Basic / primitive graphics) -- Ported to IDL by PJK in 2011, added new routines and compared with W Han's cut-down PROCESS -- Ported to Python in 2018 by SIM and added MFILE reading - - -""" -# Imported libraries -import argparse -import process.io.mfile as mf -import numpy as np -import matplotlib.pyplot as plt -import sys - - -# Functions -def ohmic(): - """Ohmic Power""" - Foh = ( - plascur - * plascur - / 16.0 - * R - / (a * a * K) - * Zeff - * (1.0 + 1.5 * alphat) - / fnc - / Trat**1.5 - ) - Poh = Foh / T0**1.5 - - return Poh - - -def brem_tnt(): - """Bremsstrahlung - TNT model""" - fPb = Zeff * np.sqrt(Trat) * fv / (1.0 + 2.0 * alphan + 0.5 * alphat) / 1000.0 - Pbf = n0 * n0 * fPb - Pb = Pbf * np.sqrt(T0) - - return Pb - - -def brem_process(): - """Bremsstrahlung - Process Model""" - kappa95 = 0.96 * K # RUN66 ratio of kappa95 to kappaa - vol = 20.0 * fv - - dene = 1.0e19 * nbar / np.sqrt(1.0 + alphan) - den20 = dene / 1.0e20 - - pcoef = (1.0 + alphan) * (1.0 + alphat) / (1.0 + alphan + alphat) - ten = T0 / (1.0 + alphat) * pcoef - t10 = ten / 10.0 - - fbhe = 0.9 - impc = 1.0 - fbc = 0.52 - impo = 1.0 - fbo = 0.52 - impfe = 1.0 - fbfe = 0.35 - cfe0 = 0.35415e-3 - - ralpne = 0.1 - rncne = impc * (0.009 + 0.006 * (7.0e19 / dene) ** 2.6) - rnone = impo * 0.001 - rnfene = impfe * (0.0005 * (7.0e19 / dene) ** 2.3 + cfe0) - - radexp = ( - (1.0 + alphan) ** 1.5 - * np.sqrt(1.0 + alphan + alphat) - / (1.0 + 2.0 * alphan + 0.5 * alphat) - ) - - deni = dene * ( - 1.0 - 2.0 * ralpne - (6.0 * rncne + 8.0 * rnone + 26.0 * rnfene) - ) # No beam - - vr = R * (a * (1.0 + kappa95) / 2.0) ** 2 / (58.652 * vol) - phe = 65.8 * ralpne * (dene / 7.0e19) ** 1.5 * vr - pc = 1120.0 * rncne * (dene / 7.0e19) ** 1.5 * vr - po = 2240.0 * rnone * (dene / 7.0e19) ** 1.5 * vr - pfe = 44800.0 * rnfene * (dene / 7.0e19) ** 2.5 * vr - - pbremdt = 0.016 * radexp * den20 * den20 * (deni / dene) * np.sqrt(t10) # D-T Brem - pbremz = fbhe * phe + fbc * pc + fbo * po + fbfe * pfe # High Z Bremsstrahlung - - return (pbremz + pbremdt) * vol - - -def sync_tnt(): - """Synchrotron - TNT model""" - Psf1 = 18.0 * a / R / np.sqrt(Trat) - fPs = ( - 20.0 - * fv - * 4.1e-7 - * (Trat * B) ** 2.5 - * np.sqrt((1.0 - refl) / 10.0 / a / np.sqrt(K)) - ) - Psf = fPs * np.sqrt(n0) - Psf5 = fion / 250.0 / (B * B) / (1.0 + 2.0 * alphan + 2.0 * alphat) - s1 = Psf * T0**2.5 * np.sqrt(1.0 + Psf1 / np.sqrt(T0)) - s5 = n0 * T0 * Psf5 - Ps = s1 * (1.0 / fp + s4 - s5) - - return Ps - - -def sync_process(): - """Synchrotron - Process Model""" - pcoef = (1.0 + alphan) * (1.0 + alphat) / (1.0 + alphan + alphat) - ten = T0 / (1.0 + alphat) * pcoef / 10.0 - den20 = nbar / np.sqrt(1.0 + alphan) / 10.0 - xfact = 5.7 * a / R / np.sqrt(ten) - - Ps = ( - 1.3e-4 - * (B * ten) ** 2.5 - * np.sqrt(1.0 + xfact) - * np.sqrt(den20 / a) - * np.sqrt(1.0 - refl) - * fv - * 20.0 - ) - # MW/m3 to MW conversion - - return Ps - - -def sync_albajar_fidone(): - """Synchroton - Albajar and Fidone Model""" - tbet = 2.0 # tbet is betaT in Albajar, not to be confused with plasma beta - rpow = 0.62 # rpow is the (1-Rsyn) power dependence based on plasma shape - - de2o = n0 / 10.0 - teo = T0 * Trat - pao = 6040.0 * (a * de2o) / B - gfun = 0.93 * (1.0 + 0.85 * np.exp(-0.82 * R / a)) - kfun = (alphan + 3.87 * alphat + 1.46) ** (-0.79) - kfun = kfun * (1.98 + alphat) ** 1.36 * tbet**2.14 - kfun = kfun * (tbet**1.53 + 1.87 * alphat - 0.16) ** (-1.33) - dum = 1.0 + 0.12 * (teo / (pao**0.41)) * (1.0 - refl) ** 0.41 - - dum = dum ** (-1.51) # Very high T modification, from Fidone - - Ps = 3.84e-8 * (1.0 - refl) ** rpow * R * a**1.38 - Ps = Ps * K**0.79 * B**2.62 * de2o**0.38 - Ps = Ps * teo * (16.0 + teo) ** 2.61 * dum * gfun * kfun - - return Ps - - -def fusion(): - """Charged particle fusion power""" - if fuel2 == "T": - FPalf = 1.52e-6 * fv * n0 * n0 * ((Zimp - Zeff) / (Zimp - 1.0)) ** 2 - g = 23.9 - 17.87 * T0**0.04 - GGG = 23.9 - (17.37 + 0.7 * np.log(T0)) * T0**0.04 - else: - FPalf = 1.16e-7 * fv * n0 * n0 * ((Zimp - Zeff) / (1.8 * Zimp - 2.6)) ** 2 - g = 14.81 - 10.0 * T0**0.03 - GGG = 14.81 - (9.79 + 0.3 * np.log(T0)) * T0**0.03 - - Palf = FPalf * (T0**g) / (1.0 + 2.0 * alphan + GGG * alphat) - - return Palf - - -def transport(): - """ - Calculate transport power given the stored plasma thermal energy - and a chosen scaling law for the confinement time - - etherm = plasma thermal energy in MJ - nbar = line-averaged density in 1.0E19 /m3 - afuel = average fuel mass (amu) - - ptransp = etherm/taue, but taue = ftaue*(Ptransp**pheat_index), - hence the coding below - """ - - if scaling_law == 0: # IPB98(y,2) scaling - etherm = 0.4 * C1 / (B * B) * n0 / fp * fion * T0 - - # ftaue is the confinement time scaling, but excluding the pheat term - ftaue = ( - hfact - * 0.0562 - * plascur**0.93 - * B**0.15 - * nbar**0.41 - * R**1.39 - * a ** (0.58) - * K**0.78 - * afuel**0.19 - ) - - pheat_index = -0.69 - - Ptransp = (etherm / ftaue) ** (1.0 / (1.0 + pheat_index)) - - else: - print("scaling_law =", scaling_law) - print("No such scaling law!") - sys.exit() - - return Ptransp - - -if __name__ == "__main__": - - # Setup command line arguments - parser = argparse.ArgumentParser( - description="Displays a POPCON plot for input MFILE. " - "For more information contact Stuart.Muldrew@ukaea.uk or Peter.Knight@ukaea.uk" - ) - - parser.add_argument( - "-f", - metavar="MFILE", - type=str, - default="MFILE.DAT", - help="specify the MFILE (default=MFILE.DAT)", - ) - - parser.add_argument( - "-s", "--save", help="save as well as displaying figure", action="store_true" - ) - - parser.add_argument( - "-t", - "--test", - help="Test mode: ignores MFILE and runs with R Kembleton's test values", - action="store_true", - ) - - parser.add_argument( - "-x", - type=int, - default="1", - help="Temperature (x-axis): (0) Central (1) Volume (default=1)", - ) - - parser.add_argument( - "-y", - type=int, - default="1", - help="Density (y-axis): (0) Central (1) Volume (2) Line (default=1)", - ) - - parser.add_argument( - "-z", - type=int, - default="0", - help="Power (z-axis): (0) Aux for balance (1) Net (2) Fusion (default=0)", - ) - - parser.add_argument( - "-zimp", type=float, default="18.0", help="Impurity charge (default=18 Ar)" - ) - - args = parser.parse_args() - - # Option for Test mode that ignores MFILE and runs with set parameters - if args.test: - # Test parameters from Richard Kembleton's DEMO1 (01/10/2013) - a = 2.2 # Minor radius (m) - R = 9.0 # Major radius (m) - K = 1.8 # Elongation - plascur = 12.0 # Plasma current (MA) - refl = 0.6 # Wall reflection coeff. - B = 7.33 # Toroidal magnetic field - alphan = 0.37 # Density profile index - alphat = 1.63 # Temperature profile index - Zimp = 18.0 # Impurity charge - Zeff = 3.48 # Zeff - Trat = 1.0 # Ratio of Te/Ti - Paux = 70.0 # Auxiliary power (MW) - fuel2 = "T" # T or He3 species mix with D - hfact = 1.4 # Confinement time H factor - j0fudge = 0.367 # Only used for Ohmic power calculation - RadModel = 2 # Radiation Model: (0) TNT (1) Process (2) Process + Albajar and Fidone Sync Model - else: - m_file = mf.MFile(args.f) - a = m_file.data["rminor"].get_scan(-1) # Minor radius (m) - R = m_file.data["rmajor"].get_scan(-1) # Major radius (m) - K = m_file.data["kappaa"].get_scan(-1) # Elongation - plascur = m_file.data["plascur/1d6"].get_scan(-1) # Plasma current (MA) - refl = m_file.data["ssync"].get_scan(-1) # Wall reflection coeff. - B = m_file.data["bt"].get_scan(-1) # Toroidal magnetic field - alphan = m_file.data["alphan"].get_scan(-1) # Density profile index - alphat = m_file.data["alphat"].get_scan(-1) # Temperature profile index - Zeff = m_file.data["zeff"].get_scan(-1) # Zeff - te = m_file.data["te"].get_scan(-1) # Electron temperature - ti = m_file.data["ti"].get_scan(-1) # Ion temperature - Paux = m_file.data["pinjmw"].get_scan(-1) # Auxiliary power (MW) - hfact = m_file.data["hfact"].get_scan(-1) # Confinement time H factor - - Zimp = args.zimp # Impurity charge - - Trat = te / ti # Ratio of Te/Ti - fuel2 = "T" # T or He3 species mix with D - j0fudge = 0.367 # Only used for Ohmic power calculation - RadModel = 2 # Radiation Model: (0) TNT (1) Process (2) Process + Albajar and Fidone Sync Model - - # Array Size - nmin = 2.0976 - nmax = 25.0 - tmin = 5.0 - tmax = 40.0 - - tpoints = 120 - npoints = 100 - - # Plot - output = "popcon.pdf" - # Temperature (x-axis): (0) Central (1) Volume - temp = args.x - # Density (y-axis): (0) Central (1) Volume (2) Line - dens = args.y - # Power (z-axis): (0) Aux for balance (1) Net - Pplot = args.z - - # Scaling (Only one option) - scaling_law = 0 - scaling_name = "IPB98(y,2)" - - ##################################### - - # Main Program - - # Imports - - if fuel2 == "He3": - if Zeff == 1.445: - Zeff = 1.445 - - # Critical density, I/N scaling - - ncrit = 6.67 * plascur / (np.pi * a * a * K) - - if nmax == 0.0: - nmax = ncrit - - # Line-averaged electron density, 1.0E19 m-3 - nbar = nmin + np.arange(npoints) / (npoints - 1) * (nmax - nmin) - nbar = np.flip(nbar, 0) - - # Central ion temperature, keV - T0 = tmin + np.arange(tpoints) / (tpoints - 1) * (tmax - tmin) - - # Meshgrid - T0, nbar = np.meshgrid(T0, nbar) - - # Perform main calculation at the given line-averaged density, central temperature - n0 = nbar * np.sqrt(1.0 + alphan) # Central density - - fnc = (1.0 - np.sqrt(2.63 * a / R / (5.0 + alphat))) ** 2 - j0 = j0fudge * plascur / (np.pi * a * a * K) * (1.0 + 1.5 * alphat) / fnc - - fv = a * a * R * K # Volume factor - C1 = B * B * fv / 8.49 - fp = 1.0 + alphan + alphat # Profile factor - s3 = fp + 0.72 * (1.0 + 3.0 * alphat) - s4 = ( - 4.16 - * ((1.0 + 1.5 * alphat) * plascur / 5.0 / a / B / K) ** 2 - / (2.0 + 4.5 * alphat) - / s3 - ) - - if fuel2 == "T": - fion = Trat + (1.0 + Zimp - Zeff) / Zimp - nrat = (Zeff - 1.0) / Zimp / (Zimp - Zeff) - afuel = (2.5 + 2.0 * nrat) / (1.0 + nrat) - else: - fion = Trat + (Zimp * (Zimp - Zeff) + 1.8 * Zeff - 2.6) / Zimp / ( - 1.8 * Zimp - 2.6 - ) - nrat = (1.8 * Zeff - 2.6) / Zimp / (Zimp - Zeff) - afuel = 2.0 - 0.2 / (1.0 + nrat) - - # Power Calculations - Pohm = ohmic() - - if RadModel == 0: - Pbrem = brem_tnt() - Psync = sync_tnt() - elif RadModel == 1: - Pbrem = brem_process() - Psync = sync_process() - elif RadModel == 2: - Pbrem = brem_process() - Psync = sync_albajar_fidone() - - Palf = fusion() - Ptransp = transport() - - # Total (continuum) radiation power - Prad = Pbrem + Psync - # Total heating power (excluding Paux) - Pheat = Palf + Pohm - - # Auxiliary power required for power balance (MW) - Pbal = -(Pheat - Ptransp - Prad) - - # Net power output (MW) - Pnet = Pheat + Paux - Ptransp - Prad - - # - if temp == 0: - xtitle = "Central ion temperature / keV" - xplot = T0[0, :] - elif temp == 1: - xtitle = "Volume averaged ion temperature / keV" - xplot = T0[0, :] / (1.0 + alphat) - - if dens == 0: - ytitle = r"Central electron density / $10^{19} \mathrm{m}^3$" - yplot = nbar[:, 0] * np.sqrt(1.0 + alphan) - elif dens == 1: - ytitle = r"Volume averaged electron density / $10^{19} \mathrm{m}^3$" - yplot = nbar[:, 0] / np.sqrt(1.0 + alphan) - elif dens == 2: - ytitle = r"Line-averaged electron density / $10^{19} \mathrm{m}^3$" - yplot = nbar[:, 0] - - # Plotting - if Pplot == 0: - plt.contourf( - Pbal, - 60, - extent=[np.min(xplot), np.max(xplot), np.max(yplot), np.min(yplot)], - cmap="jet", - ) - plt.colorbar(label="$P_{aux}$ / MW") - plt.contour( - Pbal, - levels=[0.0], - extent=[np.min(xplot), np.max(xplot), np.max(yplot), np.min(yplot)], - colors="k", - ) - elif Pplot == 1: - plt.contourf( - Pnet, - 60, - extent=[np.min(xplot), np.max(xplot), np.max(yplot), np.min(yplot)], - cmap="jet", - ) - plt.colorbar(label="$P_{net}$ / MW") - plt.contour( - Pnet, - levels=[0.0], - extent=[np.min(xplot), np.max(xplot), np.max(yplot), np.min(yplot)], - colors="k", - ) - elif Pplot == 2: - plt.contourf( - 5.0 * Palf, - 60, - extent=[np.min(xplot), np.max(xplot), np.max(yplot), np.min(yplot)], - cmap="jet", - ) - plt.colorbar(label="$P_{fus}$ / MW") - - plt.xlabel(xtitle) - plt.ylabel(ytitle) - if args.save: - plt.savefig(output) - plt.show() diff --git a/utilities/process_io_lib/NCDFfromMFILE.py b/utilities/process_io_lib/NCDFfromMFILE.py deleted file mode 100755 index 3382bc81..00000000 --- a/utilities/process_io_lib/NCDFfromMFILE.py +++ /dev/null @@ -1,473 +0,0 @@ -""" -Author: Steven Torrisi, University of Rochester - -Data which is stored in the NCDF file from an MFIlE library, -with it's title in the NCDF file listed in parentheses -(Asterisk next to data which is optional and only stored if available): - -Title (title)* -Author (author)* -Description (description)* - -Dimensions (dimensions) - -Name - -Coordinates [From here you can infer # of steps, upper bound, lower bound] - -Variables (variables) - -The dimensions which they are plotted along [same as the ones above] - -ifail is always included as one of these by default, so there is at least - one variable of interest -""" - -from getpass import getuser -from process.io.mfile import MFile -from numpy import array, transpose -from netCDF4 import Dataset -from datetime import datetime -from process.io.configuration import Config - - -class NCDFconverter(Config): - """ - Contains all of the necessary methods to convert MFILEs from a scan - to a NetCDF file. - """ - - def __init__(self, configfilename="ndscan.json", verbosity=1): - """ - Specifies which configuration file to read. - Arguments: - configfile---> Specifies which configuration file to read. - verbosity----> If set to 1, will output some information about - the results. If set to 2, will output results as well as process. - - Class variables: - currentstep------------->Array to keep track of the current step - along the coordinates - variablecollector-------> List of lists which collects the values - for each variable of interest - variablepointers--------> List of strings; the names of each - variable of interest - output_vars-------------> List of strings; the names of each - variable of interest - stepstring--------------> String which represents the current step - along the coordinates. - mfiledir----------------> Directory name in which the MFILES are - stored. - - """ - super().__init__(configfilename) - - self.currentstep = [] - self.variablecollector = [] - self.variablepointers = [] - self.steptuple = () - self.axestuple = () - self.mfiledir = "MFILES" - self.stepstring = "" - self.verbosity = verbosity - - self.configfilename = configfilename - self.output_vars = self.get("output_vars", default=[]) - if len(self.output_vars) == 0: - print( - "No output variables specified in config file.", - self.configfilename, - "\nNo NetCDF file created!", - ) - exit() - - self.time = str(datetime.now()) - self.time = self.time.replace(" ", "-") - self.time = self.time[0:16] - - self.title = self.get("title", default="NdscanOutput") - self.author = self.get("author", default=getuser()) - # self.author = self.get("author", default="No author given") - self.description = self.get("description", default="No description given") - - self.axes = self.get("axes", default=[]) - - def convert_mfilelibrary(self): - """ - The primary method which begins the conversion process on the config - file named configname. - This carries out the whole process. - """ - - # Determine roughly but dynamically what level of compression to use. - scanvars = [] - scansteps = [] - for axis in self.axes: - scanvars.append(axis["varname"]) - - if type(axis["steps"]) is list: - scansteps.append(len(axis["steps"])) - else: - scansteps.append(axis["steps"]) - - self.steptuple = tuple(scansteps) - self.axestuple = tuple(scanvars) - - compressionlevel = self.determine_compression() - - ncfile = self.create_ncfile() - - # A list of lists which will later be converted into n dimensional - # arrays for each variable. - # The first list corresponds to the first variable, and so on. - self.create_dimensions_and_var_slots(ncfile) - # The first case is done independent of the automation. - - self.do_first_extraction() - - self.start_automated_extraction() - - ncfile.scanvars = ",".join(self.axestuple) - ncfile.scansteps = self.steptuple - - self.store_variables_from_collector(ncfile, compressionlevel) - - self.print_ncfile_variables(ncfile) - - def increment_current_step(self): - """ - Increments the current step; compares it against self.steptuple, and - when an individual axis as at the end of it's bound, rounds up the - next axis. - - When an 'overflow' occurs along an axis, rounds up. - For example- If 0th dimension has n steps, when the current step's 0th - dimension reaches the (n+1)th step, - increases the 1st dimension step by one, and take the 0th dimension - back down to the 1st step. - Then if the 1st dimension overflowed, take that back down to the 1st - step and tick up the 2nd dimension-- etc... - - Modifies: - - self.currentstep - - WILL END PROGRAM IF: - - The currentstep counter overflows against steptuple. This shouldn't - happen. - - """ - - self.currentstep[0] += 1 - if self.verbosity > 1: - print("Current step is now ", self.currentstep) - - # Checks to see if increasing the current step by 1 resulted in a - # dimension overflow - # if so- reset it to 0, and increment the next dimension by 1. - for i in range(len(self.currentstep)): - # Check to see if we are evaluating the last step. If so, an - # overflow must be treated differently. - if self.currentstep[-1] == self.steptuple[-1]: - print( - "A serious error has occured, and the total counts\ - have overflowed." - ) - print( - "Currentstep[-1] is", - self.currentstep[-1], - "and\ - steptuple[-1] is", - self.steptuple[-1], - ) - exit() - if self.currentstep[i] == self.steptuple[i]: - self.currentstep[i] = 0 - self.currentstep[i + 1] += 1 - - def get_next_mfile(self, currentstep): - """ - Returns the next MFile in the sequence of mfiles specified by the - config file. - - Accepts as input the tuple which describes the number of steps for - each variable. - Iterates the current step up by one, checks to make sure that the - current step - actually describes a coordinate. - - Arguments: - - currentstep----> Represents the current step the Mfile walker is on - - Returns: - mfile----------> MFile class of the new MFILE to trawl for data - - """ - - stepstring = "M." - for i in range(len(currentstep)): - stepstring += str(currentstep[i]) + "." - stepstring += "DAT" - - mfile = MFile(self.mfiledir + "/" + stepstring) - - return mfile - - def get_mfile_variables(self, mfile): - """ - Searches through the mfile for the name strings in varstofind, and - returns their corresponding values. - - Arguments: - mfile--------->MFile class describing the MFILE to be trawled - - Returns: - varoutput----->List of values corresponding with the variables in - varstofind. - """ - varoutput = [-1] * len(self.output_vars) - - error_status = mfile.data["error_status"].get_scan(-1) - if error_status < 3: - for ind, varname in enumerate(self.output_vars): - varoutput[ind] = mfile.data[varname].get_scan(-1) - # else: - # print('Unsuccessful PROCESS run, MFILE being ignored!') - - return varoutput - - def determine_compression(self): - """ - From the current ndscan conf file, determine a level of compression - based on the order of magnitude of the number of data points. - - A simple program which determines if the number of data pieces will - be greater than 10^4, 10^6, or 10^7. - - Returns: - compressionlevel-----> An integer which will be valued either 4, 6, 8, - or 9. - - """ - - # Default level established first - compressionlevel = 4 - - totalsteps = 1 - dataarray = tuple(self.steptuple) - - for i in dataarray: - totalsteps *= i - - totalpoints = totalsteps * len(self.output_vars) - - # In a rough way, dynamically set the compression based on how much - # data points are going to be - # in the file.. - - if totalpoints >= 100000: - compressionlevel = 6 - if totalpoints >= 1000000: - compressionlevel = 8 - if totalpoints >= 10000000: - compressionlevel = 9 - return compressionlevel - - def create_ncfile(self): - """ - Constructs a NetCDF file from the descriptive information contained - in an ndscan.conf file. - - The data pieces it searches for are title, output directory, - description, and author. - """ - - ncfile = Dataset(self.title + ".nc", "w") - ncfile.title = self.title - ncfile.time = self.time - ncfile.description = self.description - ncfile.author = self.author - ncfile.ndim = len(self.axes) - - return ncfile - - def create_dimensions_and_var_slots(self, ncfile): - """ - Creates a dimension for each Axis in the config file, and a spot in - memory for each variable of interest. - - The variable of interest collector lists generated will act as 'trays' - to store the variables of interest - extracted as the MFILEs have their data extracted. They will each be - coupled with the dimensions specified - in the scan. - - Arguments: - - ncfile-----> A netCDF file class which will have dimensions and - variables added to it. - - """ - self.stepstring = "M." - for i in range(len(self.axes)): - - if type(self.axes[i]["steps"]) is list: - axissize = len(self.axes[i]["steps"]) - else: - axissize = int(self.axes[i]["steps"]) - - if self.verbosity > 1: - print( - "Creating dimension:", - self.axes[i]["varname"], - "with size", - axissize, - ) - - ncfile.createDimension(dimname=self.axes[i]["varname"], size=axissize) - - self.currentstep.append(0) - self.stepstring += "0." - - for var in self.output_vars: - if self.verbosity > 1: - print("Creating variable", var) - self.variablecollector.append([]) - self.variablepointers.append(0) - - if "ifail" not in self.output_vars: - if self.verbosity > 1: - print("Adding ifail in by default...") - - self.variablecollector.append([]) - self.variablepointers.append(0) - self.output_vars.append("ifail") - - self.stepstring += "DAT" - - def do_first_extraction(self): - """ - The first MFILE extraction features more error checks than the - others for efficiency, and is handled as a seperate function - before the automated extraction begins. - """ - mstring = self.mfiledir + "/" + self.stepstring - - try: - currentmfile = MFile(mstring) - except FileNotFoundError: - print("Error: The first Mfile could not be found.") - print("\t", mstring) - print("Stopped creation of NetCDF file!") - exit() - - # Scan the MFILE for the variables of interest - recentoutput = self.get_mfile_variables(currentmfile) - - # Store output in the collector trays for each variable of interest - for collectnumber in range(len(recentoutput)): - self.variablecollector[collectnumber].append(recentoutput[collectnumber]) - - def start_automated_extraction(self): - """ - Initiates the conversion tool's automated trawl across the MFILEs - for the variables of interest. - Stores the results in variable collector's respective list for each - variable. - - Calls: increment_current_step(steptuple) - get_next_mfile - get_mfile_variables - - Modifies: - self.variablecollector - - """ - - if self.verbosity > 1: - print("Steptuple:", self.steptuple) - - totalsteps = 1 - for i in self.steptuple: - totalsteps *= i - - for step in range(totalsteps - 1): - - self.increment_current_step() - currentmfile = self.get_next_mfile(self.currentstep) - recentoutput = self.get_mfile_variables(currentmfile) - - for collectnumber in range(len(recentoutput)): - self.variablecollector[collectnumber].append( - recentoutput[collectnumber] - ) - - def store_variables_from_collector(self, ncfile, compressionlevel=4): - """ - Once the data has been extracted from the MFILEs, stores it in the - NetCDF file. - - Arguments: - - ncfile------------> The NetCDF file to write to - compressionlevel--> The level of compression to be used on the - variables - - Modifies: - self.variablepointers - - """ - steptuple = self.steptuple - elputpets = steptuple[::-1] # Reverses the steptuple because the - # format NCfiles store in is different from Python. - - for i in range(len(self.variablecollector)): - variablearray = array(self.variablecollector[i], order="C") - # IMPORTANT- I use column major ordering because I think it - # makes more sense - # in the context of the process ndscan suite. - if len(elputpets) > 1: - variablearray.shape = elputpets - self.variablepointers[i] = ncfile.createVariable( - self.output_vars[i], - "f4", - tuple(self.axestuple), - zlib=True, - complevel=compressionlevel, - least_significant_digit=4, - ) - - self.variablepointers[i][:] = transpose(variablearray[:]) - - scanvarpointers = [] - - for ind, axis in enumerate(self.axes): - scanvarname = "SCAN" + axis["varname"] - scanvarpointers.append(0) - scanvarpointers[ind] = ncfile.createVariable( - scanvarname, "f4", axis["varname"] - ) - if type(axis["steps"]) is list: - scanvarpointers[ind][:] = array(axis["steps"]) - else: - coords = [ - axis["lowerbound"] - + j - * (axis["upperbound"] - axis["lowerbound"]) - / (axis["steps"] - 1) - for j in range(int(axis["steps"])) - ] - scanvarpointers[ind][:] = array(coords) - - def print_ncfile_variables(self, ncfile): - """ - Prints the variables which are currently stored in ncfile. By default, - runs at the conclusion of each conversion. - - Arguments: - ncfile-------> Points to the NetCDF file to have variables printed. - """ - if self.verbosity > 0: - print("Results:") - for var in ncfile.variables: - print("-----------------------") - print(var) - print(ncfile.variables[var][:]) diff --git a/utilities/process_io_lib/__init__.py b/utilities/process_io_lib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/utilities/process_io_lib/a_to_b_config.py b/utilities/process_io_lib/a_to_b_config.py deleted file mode 100755 index 732a9829..00000000 --- a/utilities/process_io_lib/a_to_b_config.py +++ /dev/null @@ -1,102 +0,0 @@ -import os -from os.path import abspath - - -class AToBConfig: - """Class to read in and store configuration parameters for a_to_b - - Parameters: - wdir --> Working directory in which the .DAT files for process runs - will be created - keep_output --> Switch to keep output for intermediate steps - outdir --> Directory where the .DAT files for each step will be copied - after completion - a_filename --> Name of IN.DAT file for A - b_filename --> Name of IN.DAT file for B - path_to_process --> Location of process binary - vary_niter --> Number of vary_iteration_variables to run when a step - can't be completed - nsteps --> Number of steps to travel from A to B. Setting to 0 will - mean only the IN.DAT file for A is run - factor --> When vary_iteration_variables is called, variables will - be randomly varied between current_value * factor and - current_value / factor - bound_gap --> If a variable is set as iteration variable in A but not - in B, the targets for the upper and lower bounds of that - variable will be set as B_value * bound_gap and - B_vale / bound_gap - - """ - - wdir = abspath("wdir") - keep_output = True - outdir = abspath("steps") - a = "A.DAT" - b = "B.DAT" - path_to_process = "/home/PROCESS/develop/process.exe" - vary_niter = 20 - nsteps = 10 - factor = 1.2 - bound_gap = 1.001 - - def __init__(self, configfile="a_to_b.conf"): - if not os.path.isfile(configfile): - return - with open(configfile) as myfile: - for line in myfile: - if line[0] == "*" or line.strip() == "": - continue - elif "=" in line: - t_lhs, t_rhs = line.split("=") - lhs = t_lhs.strip().lower() - rhs = t_rhs.strip() - if lhs == "wdir": - self.wdir = abspath(rhs) - continue - elif lhs == "keep_output": - if rhs.lower() == "true": - self.keep_output = True - continue - elif rhs.lower() == "false": - self.keep_output = False - continue - elif lhs == "outdir": - self.outdir = abspath(rhs) - continue - elif lhs == "a_filename": - self.a = abspath(rhs) - continue - elif lhs == "b_filename": - self.b = abspath(rhs) - continue - elif lhs == "path_to_process": - self.path_to_process = abspath(rhs) - continue - elif lhs == "vary_niter": - if int(rhs) < 0: - print("vary_niter must be positive") - exit() - self.vary_niter = int(rhs) - continue - elif lhs == "nsteps": - if int(rhs) < 0: - print("nsteps must be positive") - exit() - self.nsteps = int(rhs) - continue - elif lhs == "factor": - if float(rhs) < 0: - print("factor must be positive") - exit() - self.factor = float(rhs) - continue - elif lhs == "bound_gap": - if float(rhs) < 0: - print("bound_gap must be positive") - exit() - self.bound_gap = float(rhs) - continue - - print("Unrecognised line in {}:".format(configfile)) - print(line) - break diff --git a/utilities/process_io_lib/diagnose_funcs.py b/utilities/process_io_lib/diagnose_funcs.py deleted file mode 100644 index d3ac8245..00000000 --- a/utilities/process_io_lib/diagnose_funcs.py +++ /dev/null @@ -1,71 +0,0 @@ -""" -A list of functions for the diagnose_process.py program - -Author: Hanni Lux (Hanni.Lux@ccfe.ac.uk) - -Notes: -19/08/2014 HL Initial version of this module. - - -Compatible with PROCESS version 319 -""" - - -from process.io.mfile import MFile -from pylab import figure, plot, xticks, subplots_adjust, grid - - -def plot_normalised_ixc(mfilename="MFILE.DAT"): - - """ - Plots the normalised values of the iteration variables of a given MFILE - """ - - m_file = MFile(mfilename) - - list_nitvar = [] - list_labels = [] - - nvar = int(m_file.data["nvar"].get_scan(-1)) - for i in range(1, nvar + 1): - - nitvar = m_file.data["nitvar{:03}".format(i)].get_scan(-1) - list_nitvar += [nitvar] - - label = m_file.data["nitvar{:03}".format(i)].var_description - - list_labels += [label.replace("_(range_normalised)", "")] - - figure() - subplots_adjust(bottom=0.2) - plot(list_nitvar, "ks") - xticks(range(len(list_nitvar)), list_labels, rotation="vertical") - grid(True) - - -def plot_normalised_icc_res(mfilename="MFILE.DAT"): - - """ - Plots the normalised values of the costraint residuals of a given MFILE - """ - - m_file = MFile(mfilename) - - list_normres = [] - list_labels = [] - - neqns = int(m_file.data["neqns+nineqns"].get_scan(-1)) - for i in range(1, neqns + 1): - - normres = m_file.data["normres{:03}".format(i)].get_scan(-1) - list_normres += [normres] - - label = m_file.data["normres{:03}".format(i)].var_description - - list_labels += [label.replace("_normalised_residue", "")] - - figure() - subplots_adjust(bottom=0.55) - plot(list_normres, "ks") - xticks(range(len(list_normres)), list_labels, rotation="vertical") - grid(True) diff --git a/utilities/process_io_lib/input_validator.py b/utilities/process_io_lib/input_validator.py deleted file mode 100644 index c4118fa0..00000000 --- a/utilities/process_io_lib/input_validator.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Validate an input file using a set of rules. -This script checks an input file to make sure it is valid before it is read in -by the Fortran in Process. The thinking behind this is that it's a lot easier -to make an input validator in Python than Fortran! This aims to catch obvious -static (i.e. pre-run-time) problems in the input to Process and provide clear -feedback. This validation is based on a set of rules, which are defined in -input_validator_rules. -""" -import inspect -import re -from process_io_lib import in_dat -from process_io_lib import input_validator_rules -from rootdir import ROOTDIR as FORTRAN_SOURCE_ROOTDIR - - -class RuleList(object): - """Creates a list of rule instances and validates them.""" - - def __init__(self, input_file_path): - """Fetches the rules and the input data. - - :param input_file_path: Path to the input file from the root project dir - :type input_file_path: str - """ - self.rules = [] - self.get_rules() - self.data = in_dat.StructuredInputData(filename=input_file_path) - - def get_rules(self): - """Create the rule instances in a list and store them - - Inspects the input_validator_rules module and filters its classes to - give only the rule classes (subclasses of Rule). Then creates an - instance of each rule in a list. - """ - - def is_rule_class(a_class): - """Checks class is a rule (subclass of Rule) - - :param a_class: Any class - :type a_class: abc.ABCMeta - :return: True if Rule subclass, False if not - :rtype: bool - """ - # Check if one of the class's parents is Rule - bases = a_class.__bases__ - for base in bases: - if base is input_validator_rules.Rule: - return True - return False - - # Fetch all classes from input_validator_rules module as (name, class) - # tuples - class_tuples = inspect.getmembers(input_validator_rules, inspect.isclass) - - # Filter to get individual rule classes only (subclasses of Rule) - rule_classes = [ - class_tuple[1] - for class_tuple in class_tuples - if is_rule_class(class_tuple[1]) - ] - - # Now create an instance of each rule class in a list - self.rules = [rule_class() for rule_class in rule_classes] - - def report_coverage(self): - """Report roughly how many input data variables are covered by rules.""" - var_names = [] - # List of variable names that can be parsed in the input file - var_name_regex = r"\s*case\s*\(['|\"](\w+)" - # Pattern for finding var names in Fortran source - input_reader_filename = FORTRAN_SOURCE_ROOTDIR + "/input.f90" - - # Create list of variables that can be parsed - with open(input_reader_filename, "r") as input_reader: - for line in input_reader.readlines(): - match = re.match(var_name_regex, line) - if match: - var_names.append(match.group(1)) - - # Check variable names for matches with rule names - # A variable is only "covered" by a rule if the rule shares the - # variable's name - rule_names = [rule.name for rule in self.rules] - missing_rules = [ - var_name for var_name in var_names if var_name not in rule_names - ] - - # Report rule coverage - var_names_total = len(var_names) - rules_total = var_names_total - len(missing_rules) - coverage = (rules_total / var_names_total) * 100 - coverage = "{0:.2f}".format(coverage) - print( - f"Rule coverage: {rules_total} variable rules / " - f"{var_names_total} possible input variables ({coverage}%)" - ) - - def filter_rules(self, filter): - """Use a filter string to select certain rules by tag. - - :param filter: String to match with rule tags - :type filter: str - """ - # Todo: implement - pass - - def validate_data(self): - """For all rules, set the input data, then check it. - - This runs the check method of each rule instance. - """ - for rule in self.rules: - rule.set_data(self.data) - rule.check() - - def print_results(self): - """Print the results stored on all the rules objects.""" - for rule in self.rules: - # Rule name - output = f"Rule {rule.name}: " - - # Pass status - if rule.passed: - output += "passed" - else: - output += "failed" - - # Fail reasons, if any - if len(rule.messages) > 0: - output += ":" - for message in rule.messages: - output += " " + message - - print(output) - - -def validate(input_file_path): - """Create the rule list and validate each rule against the input data. - - :param input_file_path: Path to the input file from the root project dir - :type input_file_path: str - """ - rule_list = RuleList(input_file_path) - rule_list.report_coverage() - rule_list.validate_data() - rule_list.print_results() diff --git a/utilities/process_io_lib/input_validator_rules.py b/utilities/process_io_lib/input_validator_rules.py deleted file mode 100644 index 21726858..00000000 --- a/utilities/process_io_lib/input_validator_rules.py +++ /dev/null @@ -1,346 +0,0 @@ -"""The rules for the input validator to run. - -This module contains the rule classes which are used by the input_validator -module to check rules on the input data to validate it. This consists of the -abstract Rule class and the individual rule subclasses which are intialised by -the input_validator module. Each rule class (a subclass of Rule) defines its own -check method, which checks that particular rule against the input data. This -method stores the result and any messages on the instance of that rule class -itself. - -To add a new rule, define a new class that inherits from the Rule class, naming -it the same as the variable it covers, but with the first letter capitalised. -Then override the __init__() and check() methods. The new rule class will be -used by the input_validator module automatically, and the rule will be checked -when input_validator is run. See class Ishape(Rule) for an example, or use the -rule snippet in the Process project on Gitlab. -""" -from abc import ABC, abstractmethod -from process_io_lib.obsolete_vars import OBS_VARS - - -class Rule(ABC): - """Abstract rule class used by individual rule subclasses - - Each rule to check on the input file data is a subclass of this. This is an - abstract base class, so only subclasses of this can be instantiated. - """ - - @abstractmethod - def __init__(self, tags): - """Initialise a Rule object. - - This is an abstract method: it must be overridden in subclasses before - they can be instantiated. - - :param tags: Tags for describing the rule; used for filtering rules - (list of strings) - :type tags: list - """ - self.name = self.__class__.__name__.lower() - # Set the rule's name to the name of the class. Lower to match the - # variable name that the rule checks - self.tags = tags - self.passed = True - # Stores result of rule. Set to False in check method in case of - # failure - self.messages = [] - # List of strings storing reason(s) for check failure - self.data = None - # Data for the rule to run on - - def set_data(self, data): - """Save the input data onto the Rule object so the check method has - access to it. - - :param data: The input data object - :type data: object - """ - self.data = data - - @abstractmethod - def check(self): - """Method to check the rule, and set the passed attribute. - - Decides whether the passed attribute should be set to True or False. - This is an abstract method: it must be overridden in subclasses before - they can be instantiated. - """ - pass - - def check_defined(self, var_name): - """Checks that a parameter is defined. - - If the parameter is undefined, sets the self.passed attribute to False - and stores a message to record the failure of the check. - - :param var_name: The name of the parameter - :type var_name: str - """ - defined = self.data.is_param_defined(var_name) - - if not defined: - message = f"{var_name} should be defined." - self.messages.append(message) - self.passed = False - - def check_undefined(self, var_name): - """Checks that a parameter is undefined. - - If the parameter is defined, sets the self.passed attribute to False - and stores a message to record the failure of the check. - - :param var_name: The name of the parameter - :type var_name: str - """ - defined = self.data.is_param_defined(var_name) - - if defined: - message = f"{var_name} shouldn't be defined." - self.messages.append(message) - self.passed = False - - def get_param_value(self, var_name): - """Gets the value of a parameter in the input data. - - :param var_name: Name of the parameter - :type var_name: str - :return: Value of the parameter - :rtype: int, float - """ - return self.data.get_param_value(var_name) - - -# Rule classes for checking individual rules -class Ishape(Rule): - """Rule subclass for checking the value of ishape and its dependencies - - ishape: switch for plasma cross-sectional shape calculation. - """ - - def __init__(self): - """Call Rule's __init__ method with tags specific to Ishape""" - super().__init__(["ishape", "kappa", "triang", "kappa95", "triang95"]) - - def check(self): - """The rule function for Ishape to check on the input data""" - # ishape must be defined - self.check_defined("ishape") - ishape = self.get_param_value("ishape") - - if ishape == 0: - # Use kappa and triang to calculate 95% kappa and triang - self.check_defined("kappa") - self.check_defined("triang") - self.check_undefined("kappa95") - self.check_undefined("triang95") - elif ishape == 1: - # Scale with aspect ratio - self.check_undefined("qlim") - self.check_undefined("kappa") - self.check_undefined("triang") - self.check_undefined("kappa95") - self.check_undefined("triang95") - elif ishape == 2: - # kappa calculated using fkzohm, triang input - self.check_defined("fkzohm") - self.check_defined("aspect") - self.check_undefined("kappa") - self.check_defined("triang") - self.check_undefined("kappa95") - self.check_undefined("triang95") - elif ishape == 3: - # kappa calculated using fkzohm, triang95 input - self.check_defined("fkzohm") - self.check_undefined("kappa") - self.check_defined("triang95") - self.check_undefined("triang") - self.check_undefined("kappa95") - elif ishape == 4: - # kappa95 and triang95 are used to calculate kappa and triang - self.check_defined("kappa95") - self.check_defined("triang95") - self.check_undefined("kappa") - self.check_undefined("triang") - elif ishape == 5: - # kappa95 and triang95 are used to calculate kappa and triang - self.check_defined("kappa95") - self.check_defined("triang95") - self.check_undefined("kappa") - self.check_undefined("triang") - elif ishape == 5: - # Use kappa and triang to calculate 95% kappa and triang - self.check_defined("kappa") - self.check_defined("triang") - self.check_undefined("kappa95") - self.check_undefined("triang95") - elif ishape == 7: - # kappa95 and triang95 are used to calculate kappa and triang - self.check_defined("kappa95") - self.check_defined("triang95") - self.check_undefined("kappa") - self.check_undefined("triang") - elif ishape == 8: - # Use kappa and triang to calculate 95% kappa and triang - self.check_defined("kappa") - self.check_defined("triang") - self.check_undefined("kappa95") - self.check_undefined("triang95") - - -class Aspect(Rule): - """Aspect ratio""" - - def __init__(self): - """Set tags specific to Aspect""" - super().__init__(["aspect"]) - - def check(self): - """Check that aspect exists in input""" - self.check_defined("aspect") - - -class Hfact(Rule): - """Energy confinement time H-factor""" - - def __init__(self): - """Set tags specific to Hfact""" - super().__init__(["hfact"]) - - def check(self): - """Check hfact is defined""" - self.check_defined("hfact") - - -class Kappa(Rule): - """Plasma elongation""" - - def __init__(self): - """Set tags specific to Kappa""" - super().__init__(["kappa", "ishape"]) - - def check(self): - """Should kappa be defined, based on value of ishape""" - # This logic is covered in the Ishape rule - ishape = self.get_param_value("ishape") - if ishape is [0, 6, 8]: - # kappa input value is used - self.check_defined("kappa") - else: - # kappa is calculated - self.check_undefined("kappa") - - -class Triang(Rule): - """Plasma triangularity""" - - def __init__(self): - """Set tags specific to Triang""" - super().__init__(["triang", "ishape"]) - - def check(self): - """Should triang be defined, based on value of ishape""" - # This logic is covered in the the Ishape rule - ishape = self.get_param_value("ishape") - if ishape in [0, 2, 6, 8]: - self.check_defined("triang") - else: - self.check_undefined("triang") - - -class Alphan(Rule): - """Density profile index""" - - def __init__(self): - """Set tags specific to Alphan""" - super().__init__(["alphan"]) - - def check(self): - """Check alphan is defined""" - self.check_defined("alphan") - - -class Alphat(Rule): - """Temperature profile index""" - - def __init__(self): - """Set tags specific to Alphat""" - super().__init__(["alphat"]) - - def check(self): - """Check alphat is defined""" - self.check_defined("alphat") - - -class Dnbeta(Rule): - """(Troyon-like) coefficient for beta scaling""" - - def __init__(self): - """Set tags specific to Dnbeta""" - super().__init__(["dnbeta", "gtscale"]) - - def check(self): - """Check if dnbeta input value is required or not""" - iprofile = self.get_param_value("iprofile") - gtscale = self.get_param_value("gtscale") - if iprofile == 0: - if gtscale == 1: - # dnbeta is calculated - self.check_undefined("dnbeta") - else: - # dnbeta is required - self.check_defined("dnbeta") - else: - # dnbeta is calculated - self.check_undefined("dnbeta") - - -class ObsoleteVarChecker(Rule): - """Checks for obsolete or renamed vars, and suggests new names.""" - - def __init__(self): - """Set tags specific to ObsoleteVarChecker""" - super().__init__(["obsolete"]) - - def check(self): - """Look for obsolete variable names and suggest alternatives.""" - # List of unrecognised vars in the input file - unrecog_vars = self.data.unrecognised_vars - - # If there are obsolete vars in the input file, fail the rule, warn - # and suggest alternatives, if available - for unrecog_var in unrecog_vars: - # Try to find the unrecognised var in the obsolete vars dict - recog_var = OBS_VARS.get(unrecog_var, False) - - if recog_var: - # Obsolete var name with new var name suggestion found - message = f'"{unrecog_var}" is obsolete; try "{recog_var}"' " instead." - elif recog_var is None: - # Obsolete var name, no new var name suggestion - message = f"{unrecog_var} is deprecated." - else: - # Var name is unrecognised, but not in obsolete list either - break - - # Fail rule with message - self.passed = False - self.messages.append(message) - - -class DuplicateChecker(Rule): - """Ensures there are no duplicate variable initialisations.""" - - def __init__(self): - """Set tags specific to DuplicateChecker""" - super().__init__(["duplicates"]) - - def check(self): - """Find any duplicate variables in the input data.""" - duplicates = self.data.duplicates - if duplicates: - # Got some duplicates - self.passed = False - duplicates_no = len(duplicates) - message = f"Found {duplicates_no} duplicates: {duplicates}" - self.messages.append(message) diff --git a/utilities/process_io_lib/input_yaml_converter.py b/utilities/process_io_lib/input_yaml_converter.py deleted file mode 100644 index d7abb5a2..00000000 --- a/utilities/process_io_lib/input_yaml_converter.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Convert an existing IN.DAT file to YAML - -This script uses the in_dat module, which parses an existing IN.DAT file and -creates an input file data object, containing an input data dict. This dict is -useful within in_dat for manipulating the data and writing new IN.DAT files. - -create_formatted_input_data_dict() then processes and restructures that dict to -create formatted_input_data_dict, which contains only the information extracted -from IN.DAT. It is formatted in a similar manner to the original input file, but -with some section heirarchy applied. This dict is then dumped to file as YAML -(input.yml). - -The motivation for this is that this input YAML file is easy to load as a dict, -which can readily be used in Python input file checks. Perhaps this more -structured format will begin to be used over the IN.DAT format, which is more -complex to parse and check. -""" - -import yaml -from process_io_lib import in_dat - -# Input file to read (.DAT) and output to write (.yml) -# If filename is provided, look in current working directory -# Absolute and relative file paths also work -INPUT_FILE_PATH = "IN.DAT" -OUTPUT_FILE_PATH = "input.yml" - -# Create InDat object by reading in .DAT input file -input_data = in_dat.StructuredInputData(filename=INPUT_FILE_PATH) - -# Dump YAML into output file (which is a YAML input file) -output_file = open(OUTPUT_FILE_PATH, "w") - -try: - yaml.dump(input_data, output_file) - print("Successfully converted input file to YAML, saved as " f"{OUTPUT_FILE_PATH}") -except Exception: - print("Failed to write YAML input file") - -output_file.close() diff --git a/utilities/process_io_lib/logging.json b/utilities/process_io_lib/logging.json deleted file mode 100644 index c2de587a..00000000 --- a/utilities/process_io_lib/logging.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "version": 1, - "disable_existing_loggers": false, - "formatters": { - "simple": { - "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s", - "datefmt": "%Y-%m-%d %H:%M:%S" - } - }, - - "handlers": { - "console": { - "class": "logging.StreamHandler", - "level": "DEBUG", - "formatter": "simple", - "stream": "ext://sys.stdout" - } - }, - - "loggers": { - "mfile": { - "level": "INFO", - "handlers": ["console"], - "propagate": "no" - }, - "in_dat": { - "level": "ERROR", - "handlers": ["console"], - "propagate": "no" - }, - "make_plot_dat": { - "level": "ERROR", - "handlers": ["console"], - "propagate": "no" - } - } -} diff --git a/utilities/process_io_lib/ndscan_vis.py b/utilities/process_io_lib/ndscan_vis.py deleted file mode 100644 index c2c09cb7..00000000 --- a/utilities/process_io_lib/ndscan_vis.py +++ /dev/null @@ -1,396 +0,0 @@ -""" Visualisation Class for Ndscan utility """ - - -from numpy import meshgrid, amax, amin, std, sum -import matplotlib.pyplot as pl -from netCDF4 import Dataset - - -class VisUtility(object): - - """Visualisation class containing Netcdf files from ndscan tool""" - - def __init__(self, ncfilename="Demonstrationdata.nc"): - """ - Opens the ncfile at ncfilename and initializes its' variables - into memory. - - """ - try: - self.ncfile = Dataset(ncfilename, "r") - except RuntimeError: - print("Error: NetCDF file does not exist, specify with -f!") - exit() - scanvars = self.ncfile.scanvars - self.scanvars = scanvars.split(",") - self.currentslice = None - self.stepslist = [] - - def make_contour_plot(self, xdim, ydim, zvar, masterconfigs): - """ - Generates a contour plot using matplotlib and the arguments given. - - Arguments: - xdim-------------> String representing the X scan dimension - ydim-------------> String representing the Y scan dimension - zvar-------------> String which represents the Z variable to plot. - masterconfigs----> A configuration dict with some details. - - - NOTE! XVAR AND Y VAR SHOULD BE A STRING REPRESENTING SCANNING VARIABLES, - ZVAR SHOULD BE A STRING REPRESENTING A VARIABLE OF INTEREST. - """ - - print("xdim is", xdim, "ydim is", ydim) - if xdim[0:4] != "SCAN": - xdim = "SCAN" + xdim - if ydim[0:4] != "SCAN": - ydim = "SCAN" + ydim - xcoords = self.ncfile.variables[xdim][:] - ycoords = self.ncfile.variables[ydim][:] - xarr, yarr = meshgrid(xcoords, ycoords) - if self.currentslice is not None: - zarr = self.ncfile.variables[zvar][self.currentslice] - else: - zarr = self.ncfile.variables[zvar][:] - zarr = zarr.transpose() - - conp = pl.contour(xarr, yarr, zarr, 10) - pl.xlabel(masterconfigs["xname"]) - pl.ylabel(masterconfigs["yname"]) - pl.title(masterconfigs["zname"]) - pl.clabel(conp, inline=1, fontsize=10) - pl.show() - - def make_scatter_plot(self, xdim, ydim, zvar, masterconfigs): - """ - Generates a contour plot using matplotlib and the arguments given. - - Arguments: - xdim-------------> String representing the X scan dimension - ydim-------------> String representing the Y scan dimension - zvar-------------> String which represents the Z variable to plot. - masterconfigs----> A configuration dicts which is not implemented. - - NOTE! XVAR AND Y VAR SHOULD BE A NUMBER REPRESENTING SCANNING VARIABLES, - ZVAR SHOULD BE A NUMBER REPRESENTING A VARIABLE OF INTEREST. - .""" - - print("xdim is", xdim, "ydim is", ydim) - if xdim[0:4] != "SCAN": - xdim = "SCAN" + xdim - if ydim[0:4] != "SCAN": - ydim = "SCAN" + ydim - xcoords = self.ncfile.variables[xdim] - ycoords = self.ncfile.variables[ydim] - xarr, yarr = meshgrid(xcoords, ycoords) - if self.currentslice is not None: - zarr = self.ncfile.variables[zvar][self.currentslice] - else: - zarr = self.ncfile.variables[zvar][:] - - zarr = zarr.transpose() - - pl.scatter(xarr, yarr, c=zarr, marker="s") - pl.xlabel(masterconfigs["xname"]) - pl.ylabel(masterconfigs["yname"]) - pl.title(masterconfigs["zname"]) - pl.colorbar() - pl.show() - - def slicescanner(self, xvarnum, yvarnum, zvar): - - """ - Figures out what the 2 dimensional slices of a >=3 dimensional space - looks like, then asks the user - to pick one to use for later 2d plots. - - How it works: A dimension tuple looks like - (x,y,z,p,q) === (0,1,2,3,4) - From there we identify which two tuples we are scanning along, say x - and p or 0 and 3 - Then we iterate through them piece by piece so extract the variable - from the ncscan designated by zvar, - then do zvar[x,0,0,p,0] from netcdf, then that's a 2d array! - and from there do analytics, store the results, and grab the next 2d - array. - And so on. - - Modifies: - stepslist - - - """ - print("Activating slice scanner for xvarnum", xvarnum, "and yvarnum", yvarnum) - superlatives = { - "ZenithSlice": None, - "NadirSlice": None, - "MaximalSlice": None, - "MinimalSlice": None, - "SteepestSlice": None, - "FlattestSlice": None, - } - sliceindexlist = [] - maxlist = [] - minlist = [] - sumlist = [] - stdlist = [] - - steptuple = tuple(self.ncfile.scansteps) - stepslist = [] - - for i in range(len(steptuple)): - if i != xvarnum and i != yvarnum: - stepslist.append(0) - else: - stepslist.append(list(range(steptuple[i]))) - self.stepslist = stepslist - # We just reset it so that it's [x][0][0][p][0]...[0] for our - # dimensions. - # Now we need this to iterate by incrementing the leftmost dimension - # until it is equal to it's steptuple - # value-- then we reduce it back to zero and add 1 to the one on the - # left. once stepslist's non-list elements - # are equal to the steptuple, then we are good. - - def keep_going(steptuple, stepslist): - - for index in range(len(stepslist)): - if type(stepslist[index]) is list: - - continue - else: - stepslist[index] += 1 - - if stepslist[index] < steptuple[index]: - return stepslist - - if stepslist[index] == steptuple[index] and index != len(stepslist) - 1: - - stepslist[index] = 0 - for countahead in range(len(stepslist))[index + 1 : :]: - - if type(stepslist[countahead]) is int: - stepslist[countahead] += 1 - - countback = -1 - if type(stepslist[-1]) is list: - while type(stepslist[countback]) is not int: - countback -= 1 - - if stepslist[countback] == steptuple[countback]: - print("Done!") - return False - - else: - return stepslist - else: - continue - print("Done!") - - return False - - print("Done!") - return False - - # A little hack to contain all of the machinery of analyzing slices - # within the loop.. - while stepslist: - - sliceindexlist.append(list(stepslist)) - temp = self.ncfile.variables[zvar][stepslist] - maxlist.append(amax(temp)) - minlist.append(amin(temp)) - sumlist.append(sum(temp)) - stdlist.append(std(temp)) - print( - "Analytics: Max:", - amax(temp), - "Min:", - amin(temp), - "Sum:", - sum(temp), - "Std:", - std(temp), - ) - print(temp) - stepslist = keep_going(steptuple, stepslist) - - print("Your options:") - for i in range(len(sliceindexlist)): - print(i, ":", sliceindexlist[i]) - - superlatives["ZenithSlice"] = sliceindexlist[maxlist.index(max(maxlist))] - superlatives["NadirSlice"] = sliceindexlist[minlist.index(min(minlist))] - - superlatives["MaximalSlice"] = sliceindexlist[sumlist.index(max(sumlist))] - superlatives["MinimalSlice"] = sliceindexlist[sumlist.index(min(sumlist))] - - superlatives["SteepestSlice"] = sliceindexlist[stdlist.index(max(stdlist))] - superlatives["FlattestSlice"] = sliceindexlist[stdlist.index(min(stdlist))] - - for keys in superlatives.keys(): - print(keys, superlatives[keys]) - - choice = input("Choose a slice by number, please:") - print() - self.currentslice = sliceindexlist[int(choice)] - return sliceindexlist[int(choice)] - - def slice_manager(self): - """ - Helps the user to determine what 2 dimensional slice to make. - - Arguments: - - ncfile------>NetCDF file to read from - - Returns: - - If everything works, a list describing the slice as a list containing - both integers and lists, with lists - indicating what dimensions the slices contain and the integers - representing where the cross-sections are made. - """ - print( - "You have chosen to make a 2d plot in ", - len(self.ncfile.dimensions), - " space.", - ) - print( - "You must summarily choose a 2d slice for the purposes of 2d\ - plotting." - ) - print("Your scan dimensions are:") - print(self.scanvars) - print("Choose which two are your dimensions of choice.") - xaxis = input("X:") - print() - yaxis = input("Y:") - print() - - while ( - xaxis not in self.ncfile.dimensions or yaxis not in self.ncfile.dimensions - ): - print("Incorrect x and y axes: Please try again.") - print(self.scanvars) - print("Choose which two are your dimensions of choice.") - xaxis = input("X:") - print() - - yaxis = input("Y:") - print() - - xvarnum = int(self.scanvars.index(xaxis)) - yvarnum = int(self.scanvars.index(yaxis)) - - print("What is the third variable that you are interested in?") - for var in self.ncfile.variables: - print(var) - - zaxis = input("Z:") - print() - - print("Generating information about slices...") - - return self.slicescanner(xvarnum, yvarnum, zaxis) - - def main(self): - """ - The main loop which manages the main menu, and from which the - other functions are called. - - Calls: - - slice_manager - make_scatter_plot - make_contour_plot - - Dependencies: - Dataset module - self.currentslice - - """ - - print("``'-.,_,.-'``'-.,_,.='``'-.,_,.-'``'-.,_,.='``") - print("``'-.,_,.-'`VISUALIZATION UTILITY`'-.,_,.='``") - print("``'-.,_,.-'``'-.,_,.='``'-.,_,.-'``'-.,_,.='``") - - masterconfigs = {"xname": "", "yname": "", "zname": ""} - - while 1 == 1: - - print("Hello, welcome to the main menu!!") - print("1) Choose a plotting routine.") - print("2) Use the slice manager (for n>2 dimensional scans)") - print("3) Exit.") - userinput = input( - "What would you like to do? Please enter a\ - number." - ) - print() - - if userinput == "1": - if len(self.ncfile.dimensions) > 2 and self.currentslice is None: - print("Warning-- You have not gone to the slice manager yet.") - print( - "Suggest that you run the slice manager first then\ - return to 2d plots" - ) - print("You have the following options.") - print("2-dimensional scan, or higher dimensions?") - print("1) 2d.") - print("2) 3d or more.") - scaninput = input("Pick one:") - print() - - if scaninput == "1": - print("Choose your x and y axes:") - print(self.scanvars) - xaxis = input("x:") - print() - while xaxis not in self.scanvars: - print("Sorry- invalid x axis! Please try again...") - xaxis = input("x:") - print() - masterconfigs["xname"] = xaxis - yaxis = input("y:") - print() - while yaxis not in self.scanvars: - print("Sorry- invalid y axis! Please try again...") - yaxis = input("y:") - print() - masterconfigs["yname"] = yaxis - print("Please choose a Z:") - for varbs in self.ncfile.variables: - if varbs[0:4] != "SCAN": - print(varbs) - zaxis = input("z:") - print() - while zaxis not in self.ncfile.variables: - print("Sorry- invalid z axis! Please try again...") - zaxis = input("z:") - print() - masterconfigs["zname"] = zaxis - print("And what plotting routine would you like?") - print("1) Contour") - print("2) Scatter w/ Z Coloring") - - plotinput = input("Please choose!:") - print() - if plotinput == "1": - self.make_contour_plot(xaxis, yaxis, zaxis, masterconfigs) - - elif plotinput == "2": - self.make_scatter_plot(xaxis, yaxis, zaxis, masterconfigs) - - elif scaninput == "2": - print("Okay- activating slice manager!") - self.currentslice = self.slice_manager() - # TODO: I don't think this actually leads to any plots! - elif userinput == "2": - print("Okay- activating slice manager!") - self.currentslice = self.slice_manager() - else: - print("Exiting.") - break diff --git a/utilities/process_io_lib/plot_proc_func.py b/utilities/process_io_lib/plot_proc_func.py deleted file mode 100755 index dd8f0f54..00000000 --- a/utilities/process_io_lib/plot_proc_func.py +++ /dev/null @@ -1,1196 +0,0 @@ -""" - - Functions for creating one-page summary - - James Morris - 26/03/2014 - - CCFE - - Compatible with PROCESS version ??? - -""" - -import math -import scipy as sp -import numpy as np - -try: - import process_io_lib.process_dicts as proc_dict -except ImportError: - print( - "The Python dictionaries have not yet been created. Please run \ -'make dicts'!" - ) - exit() - -RADIAL_BUILD = [ - "bore", - "ohcth", - "gapoh", - "tfcth", - "gapds", - "d_vv_in", - "shldith", - "blnkith", - "fwith", - "scrapli", - "rminori", - "rminoro", - "scraplo", - "fwoth", - "blnkoth", - "shldoth", - "d_vv_out", - "gapsto", - "tfthko", -] - -VERTICAL_BUILD = [ - "tfcth", - "vgap2", - "d_vv_top", - "shldtth", - "blnktth", - "top first wall vertical thickness (m)", - "top scrape-off vertical thickness (m)", - "rminor*kappa", - "midplane", - "rminor*kappa", - "vgap", - "divfix", - "shldtth", - "d_vv_top", - "vgap2", - "tfcth", -] - - -FILLCOLS = ("0.8", "0", "#00ff00", "#00ffff", "#ff0000", "#ff00ff") - -ANIMATION_INFO = [ - ("rmajor", "Major radius", "m"), - ("rminor", "Minor radius", "m"), - ("aspect", "Aspect ratio", ""), -] -# ("", "", ""), -# ("", "", "")] - - -def plotdh(axis, r0, a, delta, kap): - """Plots half a thin D-section. - - Arguments: - axis --> axis object to plot to - r0 --> major radius - a --> minor radius - delta --> triangularity - kap --> elongation - - Returns: - rs --> radial coordinates of D-section - zs --> vertical coordinates of D-section - - """ - angs = np.linspace(0, np.pi, 50, endpoint=True) - rs = r0 + a * np.cos(angs + delta * np.sin(1.0 * angs)) - zs = kap * a * np.sin(angs) - axis.plot(rs, zs, color="black") - return rs, zs - - -def plotdhgap(axis, inpt, outpt, inthk, outthk, toppt, topthk, delta, col): - """Plots half a thick D-section with a gap. - - Arguments: - axis --> axis object to plot to - inpt --> inner points - outpt --> outer points - inthk --> inner thickness - outthk --> outer thickness - toppt --> top points - topthk --> top thickness - delta --> triangularity - col --> color for fill - - """ - arc = np.pi / 4.0 - r01 = (inpt + outpt) / 2.0 - r02 = (inpt + inthk + outpt - outthk) / 2.0 - a1 = r01 - inpt - a2 = r02 - inpt - inthk - kap1 = toppt / a1 - kap2 = (toppt - topthk) / a2 - # angs = ((np.pi/2.) - arc/2.) * findgen(50)/49. - angs = np.linspace(0.0, (np.pi / 2.0) - arc / 2.0, 50, endpoint=True) - rs1 = r01 + a1 * np.cos(angs + delta * np.sin(angs)) - zs1 = kap1 * a1 * np.sin(angs) - rs2 = r02 + a2 * np.cos(angs + delta * np.sin(angs)) - zs2 = kap2 * a2 * np.sin(angs) - # angs = !pi + ((!pi/2.) - arc) * findgen(50)/49. - angs = np.linspace(np.pi, np.pi + ((np.pi / 2.0) - arc), 50, endpoint=True) - rs3 = r01 + a1 * np.cos(angs + delta * np.sin(angs)) - zs3 = kap1 * a1 * np.sin(angs) - rs4 = r02 + a2 * np.cos(angs + delta * np.sin(angs)) - zs4 = kap2 * a2 * np.sin(angs) - - axis.plot( - np.concatenate([rs1, rs2[::-1]]), - np.concatenate([zs1, zs2[::-1]]), - color="black", - ) - axis.plot( - np.concatenate([rs3, rs4[::-1]]), - -np.concatenate([zs3, zs4[::-1]]), - color="black", - ) - axis.fill( - np.concatenate([rs1, rs2[::-1]]), np.concatenate([zs1, zs2[::-1]]), color=col - ) - axis.fill( - np.concatenate([rs3, rs4[::-1]]), -np.concatenate([zs3, zs4[::-1]]), color=col - ) - - -def plot_plasma(axis, mfile_data, scan): - """Plots the plasma boundary arcs. - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE data object - scan --> scan number to use - - """ - - r0 = mfile_data.data["rmajor"].get_scan(scan) - a = mfile_data.data["rminor"].get_scan(scan) - delta = 1.5 * mfile_data.data["triang95"].get_scan(scan) - kappa = (1.1 * mfile_data.data["kappa95"].get_scan(scan)) + 0.04 - i_single_null = mfile_data.data["i_single_null"].get_scan(scan) - - x1 = (2.0 * r0 * (1.0 + delta) - a * (delta**2 + kappa**2 - 1.0)) / ( - 2.0 * (1.0 + delta) - ) - x2 = (2.0 * r0 * (delta - 1.0) - a * (delta**2 + kappa**2 - 1.0)) / ( - 2.0 * (delta - 1.0) - ) - r1 = 0.5 * math.sqrt( - (a**2 * ((delta + 1.0) ** 2 + kappa**2) ** 2) / ((delta + 1.0) ** 2) - ) - r2 = 0.5 * math.sqrt( - (a**2 * ((delta - 1.0) ** 2 + kappa**2) ** 2) / ((delta - 1.0) ** 2) - ) - theta1 = sp.arcsin((kappa * a) / r1) - theta2 = sp.arcsin((kappa * a) / r2) - inang = 1.0 / r1 - outang = 1.5 / r2 - if i_single_null == 0: - angs1 = np.linspace( - -(inang + theta1) + np.pi, (inang + theta1) + np.pi, 256, endpoint=True - ) - angs2 = np.linspace(-(outang + theta2), (outang + theta2), 256, endpoint=True) - elif i_single_null < 0: - angs1 = np.linspace( - -(inang + theta1) + np.pi, theta1 + np.pi, 256, endpoint=True - ) - angs2 = np.linspace(-theta2, (outang + theta2), 256, endpoint=True) - else: - angs1 = np.linspace( - -theta1 + np.pi, (inang + theta1) + np.pi, 256, endpoint=True - ) - angs2 = np.linspace(-(outang + theta2), theta2, 256, endpoint=True) - xs1 = -(r1 * np.cos(angs1) - x1) - ys1 = r1 * np.sin(angs1) - xs2 = -(r2 * np.cos(angs2) - x2) - ys2 = r2 * np.sin(angs2) - axis.plot(xs1, ys1, color="black") - axis.plot(xs2, ys2, color="black") - - -def plot_centre_cross(axis, mfile_data, scan): - """Function to plot centre cross on plot - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE data object - scan --> scan number to use - - """ - rmajor = mfile_data.data["rmajor"].get_scan(scan) - axis.plot( - [rmajor - 0.25, rmajor + 0.25, rmajor, rmajor, rmajor], - [0, 0, 0, 0.25, -0.25], - color="black", - ) - - -def cumulative_radial_build(section, mfile_data, scan): - """Function for calculating the cumulative radial build up to and - including the given section. - - Arguments: - section --> section of the radial build to go up to - mfile_data --> MFILE data object - scan --> scan number to use - - Returns: - cumulative_build --> cumulative radial build up to section given - - """ - - cumulative_build = 0 - for item in RADIAL_BUILD: - if item == "rminori" or item == "rminoro": - cumulative_build += mfile_data.data["rminor"].get_scan(scan) - elif "d_vv_" in item: - cumulative_build += mfile_data.data["d_vv_in"].get_scan(scan) - else: - cumulative_build += mfile_data.data[item].get_scan(scan) - if item == section: - break - return cumulative_build - - -def cumulative_vertical_build(mfile_data, scan): - """Function for calculating the cumulative vertical build - - Arguments: - mfile_data --> MFILE data object - scan --> scan number to use - - Returns: - cumulative_build --> vertical build list - - """ - - cumulative_build = list() - cumulative_build.append(0.0) - - # Top - top_build = 0 - start = False - for i in range(len(VERTICAL_BUILD) - 1, -1, -1): - if not start: - if VERTICAL_BUILD[i] == "midplane": - start = True - else: - top_build += mfile_data.data[VERTICAL_BUILD[i]].get_scan(scan) - cumulative_build.insert(0, top_build) - - # bottom - start = False - bottom_build = 0 - for j in range(0, len(VERTICAL_BUILD), 1): - if not start: - if VERTICAL_BUILD[j] == "midplane": - start = True - else: - bottom_build -= mfile_data.data[VERTICAL_BUILD[j]].get_scan(scan) - cumulative_build.append(bottom_build) - - return cumulative_build - - -def plot_machine_pic(axis, mfile_data, scan=-1): - """Function to plot machine picture - - Arguments: - axis --> axis object to add plot to - mfile_data --> MFILE data object - scan --> scan number to use - - """ - - xmin = 0 - xmax = 20 - ymin = -15 - ymax = 15 - axis.set_ylim([ymin, ymax]) - axis.set_xlim([xmin, xmax]) - axis.set_autoscaley_on(False) - axis.set_autoscalex_on(False) - axis.set_xlabel("R / m") - axis.set_ylabel("Z / m") - axis.set_title("Radial build") - - if mfile_data.data["i_single_null"].get_scan(scan): - plot_i_single_null_cryo(axis, mfile_data, scan) - plot_shield_i_single_null(axis, mfile_data, scan) - - plot_plasma(axis, mfile_data, scan) - plot_centre_cross(axis, mfile_data, scan) - - -def plot_i_single_null_cryo(axis, mfile_data, scan): - """Function to plot top of cryostat - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE data object - scan --> scan number to use - - """ - triang = mfile_data.data["triang95"].get_scan(scan) - - # cryostat - temp_array_1 = () - temp_array_2 = () - # Outer side - radx = ( - cumulative_radial_build("ddwo", mfile_data, scan) - + cumulative_radial_build("gapds", mfile_data, scan) - ) / 2.0 - rminx = ( - cumulative_radial_build("ddwo", mfile_data, scan) - - cumulative_radial_build("gapds", mfile_data, scan) - ) / 2.0 - - a = cumulative_vertical_build(mfile_data, scan) - kapx = a[2] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_1 = temp_array_1 + ((rs, zs)) - - kapx = a[13] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_2 = temp_array_2 + ((rs, zs)) - - # Inner side - radx = ( - cumulative_radial_build("shldoth", mfile_data, scan) - + cumulative_radial_build("d_vv_out", mfile_data, scan) - ) / 2.0 - rminx = ( - cumulative_radial_build("shldoth", mfile_data, scan) - - cumulative_radial_build("d_vv_out", mfile_data, scan) - ) / 2.0 - - a = cumulative_vertical_build(mfile_data, scan) - kapx = a[3] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_1 = temp_array_1 + ((rs, zs)) - kapx = a[12] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_2 = temp_array_2 + ((rs, zs)) - - rs = np.concatenate([temp_array_1[0], temp_array_1[2][::-1]]) - zs = np.concatenate([temp_array_1[1], temp_array_1[3][::-1]]) - axis.fill(rs, zs, color=FILLCOLS[1]) - - rs = np.concatenate([temp_array_2[0], temp_array_2[2][::-1]]) - zs = np.concatenate([temp_array_2[1], temp_array_2[3][::-1]]) - axis.fill(rs, zs, color=FILLCOLS[1]) - - -def plot_shield_i_single_null(axis, mfile_data, scan): - """Function to plot single null case of shield - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE.DAT object - scan --> scan number to use - - """ - triang = mfile_data.data["triang95"].get_scan(scan) - blnkith = mfile_data.data["blnkith"].get_scan(scan) - blnkoth = mfile_data.data["blnkoth"].get_scan(scan) - fwith = mfile_data.data["fwith"].get_scan(scan) - fwoth = mfile_data.data["fwoth"].get_scan(scan) - blnktth = mfile_data.data["blnktth"].get_scan(scan) - tfwvt = mfile_data.data["top first wall vertical thickness (m)"].get_scan(scan) - c_shldith = cumulative_radial_build("shldith", mfile_data, scan) - c_blnkoth = cumulative_radial_build("blnkoth", mfile_data, scan) - c_blnkith = cumulative_radial_build("blnkith", mfile_data, scan) - c_fwoth = cumulative_radial_build("fwoth", mfile_data, scan) - a = cumulative_vertical_build(mfile_data, scan) - - temp_array_1 = () - temp_array_2 = () - - radx = ( - cumulative_radial_build("shldoth", mfile_data, scan) - + cumulative_radial_build("d_vv_out", mfile_data, scan) - ) / 2.0 - rminx = ( - cumulative_radial_build("shldoth", mfile_data, scan) - - cumulative_radial_build("d_vv_out", mfile_data, scan) - ) / 2.0 - kapx = a[3] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_1 = temp_array_1 + ((rs, zs)) - kapx = a[12] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_2 = temp_array_2 + ((rs, zs)) - - radx = ( - cumulative_radial_build("blnkoth", mfile_data, scan) - + cumulative_radial_build("shldith", mfile_data, scan) - ) / 2.0 - rminx = ( - cumulative_radial_build("blnkoth", mfile_data, scan) - - cumulative_radial_build("shldith", mfile_data, scan) - ) / 2.0 - kapx = a[4] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_1 = temp_array_1 + ((rs, zs)) - kapx = a[11] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_2 = temp_array_2 + ((rs, zs)) - - radx = ( - cumulative_radial_build("fwoth", mfile_data, scan) - + cumulative_radial_build("blnkith", mfile_data, scan) - ) / 2.0 - rminx = ( - cumulative_radial_build("fwoth", mfile_data, scan) - - cumulative_radial_build("blnkith", mfile_data, scan) - ) / 2.0 - kapx = a[5] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_1 = temp_array_1 + ((rs, zs)) - - radx = ( - cumulative_radial_build("scraplo", mfile_data, scan) - + cumulative_radial_build("fwith", mfile_data, scan) - ) / 2.0 - rminx = ( - cumulative_radial_build("scraplo", mfile_data, scan) - - cumulative_radial_build("fwith", mfile_data, scan) - ) / 2.0 - kapx = a[6] / rminx - (rs, zs) = plotdh(axis, radx, rminx, triang, kapx) - temp_array_1 = temp_array_1 + ((rs, zs)) - - # Top - rs = np.concatenate([temp_array_1[0], temp_array_1[2][::-1]]) - zs = np.concatenate([temp_array_1[1], temp_array_1[3][::-1]]) - axis.fill(rs, zs, color=FILLCOLS[2]) - - # for i in range(len(rs)): - # print(rs[i], zs[i]) - - rs = np.concatenate([temp_array_1[0 + 2 * 1], temp_array_1[2 + 2 * 1][::-1]]) - zs = np.concatenate([temp_array_1[1 + 2 * 1], temp_array_1[3 + 2 * 1][::-1]]) - axis.fill(rs, zs, color=FILLCOLS[1 + 2]) - - rs = np.concatenate([temp_array_1[0 + 2 * 2], temp_array_1[2 + 2 * 2][::-1]]) - zs = np.concatenate([temp_array_1[1 + 2 * 2], temp_array_1[3 + 2 * 2][::-1]]) - axis.fill(rs, zs, color=FILLCOLS[2 + 2]) - - # Bottom - rs = np.concatenate([temp_array_2[0], temp_array_2[2][::-1]]) - zs = np.concatenate([temp_array_2[1], temp_array_2[3][::-1]]) - axis.fill(rs, zs, color=FILLCOLS[2]) - - plotdhgap( - axis, - c_shldith, - c_blnkoth, - blnkith, - blnkoth, - a[11], - -blnktth, - triang, - FILLCOLS[3], - ) - plotdhgap( - axis, - c_blnkith, - c_fwoth, - fwith, - fwoth, - a[11] + blnktth, - -tfwvt, - triang, - FILLCOLS[4], - ) - - -def angle_check(angle1, angle2): - """Function to perform TF coil angle check""" - if angle1 > 1: - angle1 = 1 - if angle1 < -1: - angle1 = -1 - if angle2 > 1: - angle2 = 1 - if angle2 < -1: - angle2 = -1 - return angle1, angle2 - - -def plot_tf_coils(axis, mfile_data, scan): - """Function to plot TF coils - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE.DAT object - scan --> scan number to use - - """ - - vert_build = cumulative_vertical_build(mfile_data, scan) - - # Arc points - x_1 = mfile_data.data["xarc(1)"].get_scan(scan) - y_1 = mfile_data.data["yarc(1)"].get_scan(scan) - x_2 = mfile_data.data["xarc(2)"].get_scan(scan) - y_2 = mfile_data.data["yarc(2)"].get_scan(scan) - x_3 = mfile_data.data["xarc(3)"].get_scan(scan) - y_3 = mfile_data.data["yarc(3)"].get_scan(scan) - x_4 = mfile_data.data["xarc(4)"].get_scan(scan) - y_4 = mfile_data.data["yarc(4)"].get_scan(scan) - x_5 = mfile_data.data["xarc(5)"].get_scan(scan) - # y_5 = mfile_data.data["yarc(5)"].get_scan(scan) - - # Arc centres - x_c_1 = mfile_data.data["xctfc(1)"].get_scan(scan) - y_c_1 = mfile_data.data["yctfc(1)"].get_scan(scan) - x_c_2 = mfile_data.data["xctfc(2)"].get_scan(scan) - y_c_2 = mfile_data.data["yctfc(2)"].get_scan(scan) - x_c_3 = mfile_data.data["xctfc(3)"].get_scan(scan) - y_c_3 = mfile_data.data["yctfc(3)"].get_scan(scan) - x_c_4 = mfile_data.data["xctfc(4)"].get_scan(scan) - y_c_4 = mfile_data.data["yctfc(4)"].get_scan(scan) - - # Arc radii - rad_1 = np.sqrt((x_1 - x_c_1) ** 2 + (y_1 - y_c_1) ** 2) - rad_2 = np.sqrt((x_2 - x_c_2) ** 2 + (y_2 - y_c_2) ** 2) - rad_3 = np.sqrt((x_3 - x_c_3) ** 2 + (y_3 - y_c_3) ** 2) - rad_4 = np.sqrt((x_4 - x_c_4) ** 2 + (y_4 - y_c_4) ** 2) - - # Arc angles - a_1_1 = (x_1 - x_c_1) / rad_1 - a_1_2 = (x_2 - x_c_1) / rad_1 - a_1_1, a_1_2 = angle_check(a_1_1, a_1_2) - angle_1_1 = sp.arcsin(a_1_1) - angle_1_2 = sp.arcsin(a_1_2) - - a_2_1 = (x_2 - x_c_2) / rad_2 - a_2_2 = (x_3 - x_c_2) / rad_2 - a_2_1, a_2_2 = angle_check(a_2_1, a_2_2) - angle_2_1 = sp.arcsin(a_2_1) - angle_2_2 = sp.arcsin(a_2_2) - - a_3_1 = (x_3 - x_c_3) / rad_3 - a_3_2 = (x_4 - x_c_3) / rad_3 - a_3_1, a_3_2 = angle_check(a_3_1, a_3_2) - angle_3_1 = sp.arcsin(a_3_1) - angle_3_2 = sp.arcsin(a_3_2) - - a_4_1 = (x_4 - x_c_4) / rad_4 - a_4_2 = (x_5 - x_c_4) / rad_4 - a_4_1, a_4_2 = angle_check(a_4_1, a_4_2) - angle_4_1 = sp.arcsin(a_4_1) - angle_4_2 = sp.arcsin(a_4_2) - - in_x_1 = ( - rad_1 * np.sin(np.linspace(angle_1_1, angle_1_2, 30, endpoint=True)) + x_c_1 - ) - in_x_2 = ( - rad_2 * np.sin(np.linspace(angle_2_1, angle_2_2, 30, endpoint=True)) + x_c_2 - ) - in_x_3 = ( - rad_3 * np.sin(np.linspace(angle_3_1, angle_3_2, 30, endpoint=True)) + x_c_3 - ) - in_x_4 = ( - rad_4 * np.sin(np.linspace(angle_4_1, angle_4_2, 30, endpoint=True)) + x_c_4 - ) - in_x = np.concatenate((in_x_1, in_x_2, in_x_3, in_x_4)) - - in_y_1 = ( - rad_1 * np.cos(np.linspace(angle_1_1, angle_1_2, 30, endpoint=True)) + y_c_1 - ) - in_y_2 = ( - rad_2 * np.cos(np.linspace(angle_2_1, angle_2_2, 30, endpoint=True)) + y_c_2 - ) - in_y_3 = ( - rad_3 * np.cos(np.linspace(angle_3_1, angle_3_2, 30, endpoint=True)) + y_c_3 - ) - in_y_4 = ( - rad_4 * np.cos(np.linspace(angle_4_1, angle_4_2, 30, endpoint=True)) + y_c_4 - ) - in_y = np.concatenate((in_y_1, in_y_2, in_y_3, in_y_4)) - - in_x = np.concatenate([in_x, in_x[::-1]]) - in_y = np.concatenate([in_y, -in_y[::-1]]) - - # in_width = cumulative_radial_build("gapsto", mfile_data, scan) - \ - # cumulative_radial_build("tfcth", mfile_data, scan) - # out_width = in_width + mfile_data.data["tfcth"].get_scan(scan) + \ - # mfile_data.data["tfthko"].get_scan(scan) - # out_x = ((in_x - cumulative_radial_build("tfcth", mfile_data, scan)) * - # (out_width/in_width)) - - centre_in_x = ( - cumulative_radial_build("tfcth", mfile_data, scan) - + cumulative_radial_build("gapsto", mfile_data, scan) - ) / 2.0 - centre_out_x = ( - cumulative_radial_build("gapoh", mfile_data, scan) - + cumulative_radial_build("tfthko", mfile_data, scan) - ) / 2.0 - in_width = cumulative_radial_build( - "gapsto", mfile_data, scan - ) - cumulative_radial_build("tfcth", mfile_data, scan) - out_width = cumulative_radial_build( - "tfthko", mfile_data, scan - ) - cumulative_radial_build("gapoh", mfile_data, scan) - - out_x = ((in_x - centre_in_x) * (out_width / in_width)) + centre_out_x - extern = vert_build[7] / vert_build[6] - if vert_build[-1]: - extern = (vert_build[0] - vert_build[15]) / (vert_build[1] - vert_build[14]) - out_y = in_y * extern - - shift = (vert_build[0] + vert_build[15]) / 2.0 - out_y += shift - in_y += shift - - ins_x = np.concatenate((in_x, out_x[::-1])) - ins_y = np.concatenate((in_y, out_y[::-1])) - - axis.fill(ins_x, ins_y, color=FILLCOLS[0]) - - -def plot_pf_coils(axis, mfile_data, scan): - """Function to plot PF coils - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE.DAT object - scan --> scan number to use - - """ - - coils_r = [] - coils_z = [] - coils_dr = [] - coils_dz = [] - coil_text = [] - - # Number of coils (1 is OH coil) - number_of_coils = 0 - for item in mfile_data.data.keys(): - if "rpf(" in item: - number_of_coils += 1 - - # Get OH coil - coils_r.append(mfile_data.data["rpf(nohc)"].get_scan(scan)) - coils_z.append(mfile_data.data["zpf(nohc)"].get_scan(scan)) - coils_dr.append(mfile_data.data["ohdr"].get_scan(scan)) - coils_dz.append(mfile_data.data["ohdz"].get_scan(scan)) - coil_text.append("OH") - - # Rest of the coils - for coil in range(1, number_of_coils): - coils_r.append(mfile_data.data["rpf({:02})".format(coil)].get_scan(scan)) - coils_z.append(mfile_data.data["zpf({:02})".format(coil)].get_scan(scan)) - coils_dr.append(mfile_data.data["pfdr{:02}".format(coil)].get_scan(scan)) - coils_dz.append(mfile_data.data["pfdz{:02}".format(coil)].get_scan(scan)) - coil_text.append(str(coil)) - - for i in range(len(coils_r)): - r_1 = coils_r[i] - 0.5 * coils_dr[i] - z_1 = coils_z[i] - 0.5 * coils_dz[i] - r_2 = coils_r[i] - 0.5 * coils_dr[i] - z_2 = coils_z[i] + 0.5 * coils_dz[i] - r_3 = coils_r[i] + 0.5 * coils_dr[i] - z_3 = coils_z[i] + 0.5 * coils_dz[i] - r_4 = coils_r[i] + 0.5 * coils_dr[i] - z_4 = coils_z[i] - 0.5 * coils_dz[i] - r_5 = coils_r[i] - 0.5 * coils_dr[i] - z_5 = coils_z[i] - 0.5 * coils_dz[i] - - r_points = [r_1, r_2, r_3, r_4, r_5] - z_points = [z_1, z_2, z_3, z_4, z_5] - axis.plot(r_points, z_points, color="black") - axis.text( - coils_r[i], - coils_z[i], - coil_text[i], - ha="center", - va="center", - fontsize="smaller", - ) - - -def plot_info(axis, data, mfile_data, scan): - """Function to plot data in written form on a matplotlib plot. - - Arguments: - axis --> axis object to plot to - data --> plot information - mfile_data --> MFILE.DAT object - scan --> scan number to use - - """ - eqpos = 0.7 - for i in range(len(data)): - axis.text(0, -i, data[i][1], ha="left", va="center") - if isinstance(data[i][0], str): - if data[i][0] == "": - axis.text(eqpos, -i, "\n", ha="left", va="center") - elif data[i][0][0] == "#": - axis.text( - -0.05, -i, "{}\n".format(data[i][0][1:]), ha="left", va="center" - ) - elif data[i][0][0] == "!": - value = data[i][0][1:] - axis.text( - 0.4, - -i, - "--> " + str(value.replace('"', "")) + " " + data[i][2], - ha="left", - va="center", - ) - else: - if mfile_data.data[data[i][0]].exists: - dat = mfile_data.data[data[i][0]].get_scan(scan) - if isinstance(dat, str): - value = dat - else: - value = "{:.4g}".format( - mfile_data.data[data[i][0]].get_scan(scan) - ) - if "alpha" in data[i][0]: - value = str(float(value) + 1.0) - axis.text( - eqpos, - -i, - "= " + value + " " + data[i][2], - ha="left", - va="center", - ) - else: - mfile_data.data[data[i][0]].get_scan(-1) - axis.text( - eqpos, -i, "=" + "ERROR! Var missing", ha="left", va="center" - ) - else: - dat = data[i][0] - if isinstance(dat, str): - value = dat - else: - value = "{:.4g}".format(data[i][0]) - axis.text( - eqpos, -i, "= " + value + " " + data[i][2], ha="left", va="center" - ) - - -def plot_geometry_info(axis, mfile_data, scan): - """Function to plot geometry info - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE.DAT object - scan --> scan number to use - - """ - xmin = 0 - xmax = 1 - ymin = -16 - ymax = 1 - - axis.text(-0.05, 1, "Geometry:", ha="left", va="center") - axis.set_ylim([ymin, ymax]) - axis.set_xlim([xmin, xmax]) - axis.set_axis_off() - axis.set_autoscaley_on(False) - axis.set_autoscalex_on(False) - - in_blanket_thk = mfile_data.data["shldith"].get_scan(scan) + mfile_data.data[ - "blnkith" - ].get_scan(scan) - out_blanket_thk = mfile_data.data["shldoth"].get_scan(scan) + mfile_data.data[ - "blnkoth" - ].get_scan(scan) - - data = [ - ("rmajor", "$R_0$", "m"), - ("rminor", "a", "m"), - ("aspect", "A", ""), - ("kappa95", r"$\kappa_{95}$", ""), - ("triang95", r"$\delta_{95}$", ""), - ("sarea", "Surface area", "m$^2$"), - ("vol", "Plasma volume", "m$^3$"), - ("n_tf", "No. of TF coils", ""), - (in_blanket_thk, "i/b blkt/shld", "m"), - (out_blanket_thk, "o/b blkt/shld", "m"), - ("powfmw", "Fusion power", "MW"), - ("", "", ""), - ("#User Info", "", ""), - ("!" + str(mfile_data.data["procver"].get_scan(scan)), "PROCESS Version", ""), - ("!" + mfile_data.data["date"].get_scan(scan), "Date:", ""), - ("!" + mfile_data.data["time"].get_scan(scan), "Time:", ""), - ("!" + mfile_data.data["username"].get_scan(scan), "User:", ""), - ( - "!" - + proc_dict.DICT_OPTIMISATION_VARS[ - abs(int(mfile_data.data["minmax"].get_scan(scan))) - ], - "Optimising:", - "", - ), - ] - - plot_info(axis, data, mfile_data, scan) - - -def plot_physics_info(axis, mfile_data, scan): - """Function to plot geometry info - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE.DAT object - scan --> scan number to use - - """ - xmin = 0 - xmax = 1 - ymin = -16 - ymax = 1 - - axis.text(-0.05, 1, "Physics:", ha="left", va="center") - axis.set_ylim([ymin, ymax]) - axis.set_xlim([xmin, xmax]) - axis.set_axis_off() - axis.set_autoscaley_on(False) - axis.set_autoscalex_on(False) - - nong = mfile_data.data["dnla"].get_scan(scan) / mfile_data.data[ - "dlimit(7)" - ].get_scan(scan) - - dnz = mfile_data.data["dnz"].get_scan(scan) / mfile_data.data["dene"].get_scan(scan) - - data = [ - ("plascur/1d6", "$I_p$", "MA"), - ("bt", "Vacuum $B_T$ as $R_0$", "T"), - ("q", "$q_{edge}$", ""), - ("normalised thermal beta", r"$\beta_N$, thermal", "% m T MA$^{-1}$"), - ("normalised total beta", r"$\beta_N$, total", "% m T MA$^{-1}$"), - ("thermal poloidal beta", r"$\beta_P$, thermal", ""), - ("betap", r"$\beta_P$, total", ""), - ("te", r"$< t_e >$", "keV"), - ("dene", r"$< n_e >$", "m$^{-3}$"), - (nong, r"$< n_{\mathrm{e,line}} >/n_G$", ""), - ("alphat", r"$T_{e0}/ < T_e >$", ""), - ("alphan", r"$n_{e0}/ < n_{\mathrm{e, vol}} >$", ""), - ("zeff", r"$Z_{\mathrm{eff}}$", ""), - ("zeffso", r"$Z_{\mathrm{eff, SoL}}$", ""), - (dnz, r"$n_Z/ < n_{\mathrm{e, vol}} >$", ""), - ("taueff", r"$\tau_e$", "s"), - ("hfact", "H-factor", ""), - ("tauelaw", "Scaling law", ""), - ] - - plot_info(axis, data, mfile_data, scan) - - -def plot_magnetics_info(axis, mfile_data, scan): - """Function to plot magnet info - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE.DAT object - scan --> scan number to use - - """ - - xmin = 0 - xmax = 1 - ymin = -16 - ymax = 1 - - axis.text(-0.05, 1, "Coil currents etc:", ha="left", va="center") - axis.set_ylim([ymin, ymax]) - axis.set_xlim([xmin, xmax]) - axis.set_axis_off() - axis.set_autoscaley_on(False) - axis.set_autoscalex_on(False) - - # Number of coils (1 is OH coil) - number_of_coils = 0 - for item in mfile_data.data.keys(): - if "rpf(" in item: - number_of_coils += 1 - - pf_info = [] - for i in range(1, number_of_coils): - if i % 2 != 0: - pf_info.append( - ( - mfile_data.data["ric({:02})".format(i)].get_scan(scan), - "PF {}".format(i), - ) - ) - - tburn = mfile_data.data["tburn"].get_scan(scan) / 3600.0 - tftype = proc_dict.DICT_TF_TYPE[mfile_data.data["i_tf_sc_mat"].get_scan(scan)] - vssoft = mfile_data.data["vsres"].get_scan(scan) + mfile_data.data[ - "vsind" - ].get_scan(scan) - - data = [ - (pf_info[0][0], pf_info[0][1], "MA"), - (pf_info[1][0], pf_info[1][1], "MA"), - (pf_info[2][0], pf_info[2][1], "MA"), - (vssoft, "Startup flux swing", "Wb"), - ("vstot", "Available flux swing", "Wb"), - (tburn, "Burn time", "hrs"), - ("", "", ""), - ("#TF coil type is {}".format(tftype), "", ""), - ("bmaxtf", "Peak field at conductor", "T"), - ("iooic", r"I/I$_{\mathrm{crit}}$", ""), - ("tmarg", "Temperature margin", "K"), - ("sig_tf_case", "TF case maximum shear stress (Tresca criterion)", "Pa"), - ("sig_tf_wp", "TF conduit maximum shear stress (Tresca criterion)", "Pa"), - ( - "sig_tf_case_max", - "Allowable maximum shear stress in the TF case (Tresca criterion)", - "Pa", - ), - ( - "sig_tf_wp_max", - "Allowable maximum shear stress in the TF conduit (Tresca criterion)", - "Pa", - ), - ("", "", ""), - ("#Costs", "", ""), - ("coe", "Cost of electricity", r"\$MWh"), - ("concost", "Constructed cost", r"M\$"), - ("capcost", "Total capex", r"M\$"), - ] - - plot_info(axis, data, mfile_data, scan) - - -def plot_power_info(axis, mfile_data, scan): - """Function to plot power info - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE.DAT object - scan --> scan number to use - - """ - - xmin = 0 - xmax = 1 - ymin = -16 - ymax = 1 - - axis.text(-0.05, 1, "Power flows/economics:", ha="left", va="center") - axis.set_ylim([ymin, ymax]) - axis.set_xlim([xmin, xmax]) - axis.set_axis_off() - axis.set_autoscaley_on(False) - axis.set_autoscalex_on(False) - - dnla = mfile_data.data["dnla"].get_scan(scan) / 1.0e20 - bt = mfile_data.data["bt"].get_scan(scan) - surf = mfile_data.data["sarea"].get_scan(scan) - pthresh = 0.0488 * dnla**0.717 * bt**0.803 * surf**0.941 * 0.8 - err = ( - 0.057**2 - + (0.035 * sp.log(dnla)) ** 2 - + (0.032 * sp.log(bt)) ** 2 - + (0.019 * sp.log(surf)) ** 2 - ) - err = np.sqrt(err) * pthresh - - gross_eff = 100.0 * ( - mfile_data.data["pgrossmw"].get_scan(scan) - / mfile_data.data["pthermmw"].get_scan(scan) - ) - - net_eff = 100.0 * ( - ( - mfile_data.data["pgrossmw"].get_scan(scan) - - mfile_data.data["htpmw"].get_scan(scan) - ) - / ( - mfile_data.data["pthermmw"].get_scan(scan) - - mfile_data.data["htpmw"].get_scan(scan) - ) - ) - - plant_eff = 100.0 * ( - mfile_data.data["pnetelmw"].get_scan(scan) - / mfile_data.data["powfmw"].get_scan(scan) - ) - - data = [ - ("wallmw", "Av. neutron wall load", "MW m$^{-2}$"), - ("pinnerzoneradmw", "inner zone radiation", "MW"), - ("psyncpv*vol", "Synchrotron radiation", "MW"), - ("pouterzoneradmw", "outer zone radiation", "MW"), - ("pnucblkt", "Nuclear heating in blanket", "MW"), - ("pnucshld", "Nuclear heating in shield", "MW"), - ("pdivt", "Psep / Pdiv", "MW"), - (pthresh, "H-mode threshold (M=2.5)", r"$\pm${:.3f} MW".format(err)), - ("fwbllife", "FW/Blanket life", "years"), - ("divlife", "Divertor life", "years"), - ("pthermmw", "Thermal Power", "MW"), - (gross_eff, "Gross cycle efficiency", "%"), - (net_eff, "Net cycle efficiency", "%"), - ("pgrossmw", "Gross electric power", "MW"), - ("pnetelmw", "Net electric power", "MW"), - ( - plant_eff, - "Plant efficiency " + r"$\frac{P_{\mathrm{e,net}}}{P_{\mathrm{fus}}}$", - "%", - ), - ] - - plot_info(axis, data, mfile_data, scan) - - -def plot_current_drive_info(axis, mfile_data, scan): - """Function to plot current drive info - - Arguments: - axis --> axis object to plot to - mfile_data --> MFILE.DAT object - scan --> scan number to use - - """ - - xmin = 0 - xmax = 1 - ymin = -16 - ymax = 1 - - axis.text(-0.05, 1, "Neutral Beam Current Drive:", ha="left", va="center") - axis.set_ylim([ymin, ymax]) - axis.set_xlim([xmin, xmax]) - axis.set_axis_off() - axis.set_autoscaley_on(False) - axis.set_autoscalex_on(False) - - pinjie = mfile_data.data["pinjmw"].get_scan(scan) - - pdivt = mfile_data.data["pdivt"].get_scan(scan) - pdivr = pdivt / mfile_data.data["rmajor"].get_scan(scan) - - pdivnr = ( - 10.0e20 - * mfile_data.data["pdivt"].get_scan(scan) - / ( - mfile_data.data["rmajor"].get_scan(scan) - * mfile_data.data["dene"].get_scan(scan) - ) - ) - - dnla = mfile_data.data["dnla"].get_scan(scan) / 1.0e20 - bt = mfile_data.data["bt"].get_scan(scan) - surf = mfile_data.data["sarea"].get_scan(scan) - pthresh = 0.0488 * dnla**0.717 * bt**0.803 * surf**0.941 * 0.8 - flh = pdivt / pthresh - - powerht = mfile_data.data["powerht"].get_scan(scan) - psync = mfile_data.data["psyncpv*vol"].get_scan(scan) - pbrem = mfile_data.data["pinnerzoneradmw"].get_scan(scan) - hfact = mfile_data.data["hfact"].get_scan(scan) - hstar = hfact * (powerht / (powerht + psync + pbrem)) ** 0.31 - - data = [ - (pinjie, "SS auxiliary power", "MW"), - ("pheat", "Power for heating only", "MW"), - ("bootipf", "Bootstrap fraction", ""), - ("faccd", "Auxiliary fraction", ""), - ("facoh", "Ohmic fraction", ""), - ("gamnb", "NB gamma", "$10^{20}$ A W$^{-1}$ m$^{-2}$"), - ("enbeam", "NB energy", "keV"), - ("powerht", "Assumed heating power", "MW"), - (pdivr, r"$\frac{P_{\mathrm{div}}}{R_{0}}$", "MW m$^{-1}$"), - ( - pdivnr, - r"$\frac{P_{\mathrm{div}}}{ R_{0}}$", - r"$\times 10^{-20}$ MW m$^{2}$", - ), - (flh, r"$\frac{P_{\mathrm{div}}}{P_{\mathrm{LH}}}$", ""), - (hstar, "H* (non-rad. corr.)", ""), - ] - - plot_info(axis, data, mfile_data, scan) - - -# def plot_video_info(axis, data, mfile_data, scan): -# """Function to plot data to accompany video in written form on a -# matplotlib plot. -# """ -# eqpos = 0.7 -# for i in range(len(data)): -# axis.text(0, -i, data[i][1], ha='left', va='center') -# if isinstance(data[i][0], str): -# if data[i][0] == "": -# axis.text(eqpos, -i, "\n", -# ha='left', va='center') -# elif data[i][0][0] == "#": -# axis.text(-0.05, -i, "%s\n" % data[i][0][1:], -# ha='left', va='center') -# elif data[i][0][0] == "!": -# value = data[i][0][1:] -# axis.text(0.4, -i, "--> " + str(value.replace('"', '')) + -# " " + data[i][2], ha='left', va='center') -# else: -# dat = mfile_data.data[data[i][0]].get_scan(scan) -# if isinstance(dat, str): -# value = dat -# else: -# value = "%.4g" % mfile_data.data[data[i][0]].get_scan(scan) -# if "alpha" in data[i][0]: -# value = str(float(value) + 1.0) -# axis.text(eqpos, -i, '= ' + value + ' ' + data[i][2], -# ha='left', va='center') -# else: -# dat = data[i][0] -# if isinstance(dat, str): -# value = dat -# else: -# value = "%.4g" % data[i][0] -# axis.text(eqpos, -i, '= ' + value + ' ' + data[i][2], -# ha='left', va='center') -# -# -# def plot_animation_info(axis, mfile_data, scan, animation_info=ANIMATION_INFO): -# """Function to plot animation information. Where animation information -# is defined by the user. There is a default animation_info that can be -# passed as an argument. -# """ -# -# xmin = 0 -# xmax = 1 -# ymin = -16 -# ymax = 1 -# -# axis.text(-0.05, 1, 'Plant Information:', ha='left', va='center') -# axis.set_ylim([ymin, ymax]) -# axis.set_xlim([xmin, xmax]) -# axis.set_axis_off() -# axis.set_autoscaley_on(False) -# axis.set_autoscalex_on(False) -# -# plot_video_info(axis, animation_info, mfile_data, scan) -# -# -# def plot_animation_graph(axis, mfile_data, scan, x, y, xlabel, ylabel): -# """Function to plot animation information. Where animation information -# is defined by the user. There is a default animation_info that can be -# passed as an argument. -# """ -# -# #axis.set_ylim([ymin, ymax]) -# #axis.set_xlim([xmin, xmax]) -# #axis.set_axis_off() -# axis.set_autoscaley_on(False) -# axis.set_autoscalex_on(False) -# axis.set_xlabel(xlabel) -# axis.set_ylabel(ylabel) -# axis.plot(x, y) -# a = random.uniform(0, 1) -# b = random.uniform(0, 1) -# axis.plot([a, a], -# [b, b], -# color="red", marker="o", alpha=0.5, markersize=20) diff --git a/utilities/process_io_lib/process_plots.py b/utilities/process_io_lib/process_plots.py deleted file mode 100644 index 6a559902..00000000 --- a/utilities/process_io_lib/process_plots.py +++ /dev/null @@ -1,474 +0,0 @@ -""" - - PROCESS plotting library - - James Morris - 27/07/17 - CCFE - -""" -# flake8: noqa - -import numpy as np -import matplotlib.pyplot as plt -from matplotlib import gridspec - -color_sequence = [ - "#1f77b4", - "#aec7e8", - "#ff7f0e", - "#ffbb78", - "#2ca02c", - "#98df8a", - "#d62728", - "#ff9896", - "#9467bd", - "#c5b0d5", - "#8c564b", - "#c49c94", - "#e377c2", - "#f7b6d2", - "#7f7f7f", - "#c7c7c7", - "#bcbd22", - "#dbdb8d", - "#17becf", - "#9edae5", -] - -plt.rcParams["font.family"] = "serif" -plt.rcParams["font.serif"] = "Ubuntu" -plt.rcParams["font.monospace"] = "Ubuntu Mono" -plt.rcParams["font.size"] = 10 -plt.rcParams["axes.labelsize"] = 10 -plt.rcParams["xtick.labelsize"] = 9 -plt.rcParams["ytick.labelsize"] = 9 -plt.rcParams["legend.fontsize"] = 10 - -# PROCESS libraries import -try: - import process.io.mfile as mfile -except ImportError: - print("The PROCESS python libraries are not in your path. Please check PYTHONPATH") - exit() - - -def align_yaxis(ax1, v1, ax2, v2): - """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1""" - _, y1 = ax1.transData.transform((0, v1)) - _, y2 = ax2.transData.transform((0, v2)) - adjust_yaxis(ax2, (y1 - y2) / 2, v2) - adjust_yaxis(ax1, (y2 - y1) / 2, v1) - - -def adjust_yaxis(ax, ydif, v): - """shift axis ax by ydiff, maintaining point v at the same location""" - inv = ax.transData.inverted() - _, dy = inv.transform((0, 0)) - inv.transform((0, ydif)) - miny, maxy = ax.get_ylim() - miny, maxy = miny - v, maxy - v - if -miny > maxy or (-miny == maxy and dy > 0): - nminy = miny - nmaxy = miny * (maxy + dy) / (miny + dy) - else: - nmaxy = maxy - nminy = maxy * (miny + dy) / (maxy + dy) - ax.set_ylim(nminy + v, nmaxy + v) - - -def plot_pulse_timings( - mfile_obj, save=False, show=False, return_fig=False, save_path="" -): - """ - Plot the PROCESS pulse timings plot - - Will plot Ip, CS and PF currents over time - - mfile_obj : MFILE.DAT object - save : save to "process_times.png" - show : show to screen on running - return_fig : return fig object - - """ - - # setup figure - fig = plt.figure(dpi=100) - fig.subplots_adjust(hspace=0.1) - - # # Setup plot axes - gs = gridspec.GridSpec(4, 1, height_ratios=[1, 4, 4, 4]) - axis_0 = fig.add_subplot(gs[0]) - axis_1 = fig.add_subplot(gs[1]) - axis_2 = fig.add_subplot(gs[2]) - axis_3 = fig.add_subplot(gs[3]) - - # Number, n, of PF circuits including CS coil (n-1) and plasma n - number_of_pf_circuits = int(mfile_obj.data["ncirt"].get_scan(-1)) - - # Number of time points - n_time = 6 - - # Time array - time_list = ["tramp", "tohs", "theat", "tburn", "tqnch", "tdwell"] - times_array = np.array([mfile_obj.data[val].get_scan(-1) for val in time_list]) - time = np.array([sum(times_array[:i]) for i in range(len(times_array))]) - - # Current arrays - current_arrays = np.zeros(shape=(number_of_pf_circuits, n_time)) - - # PF coils - for i in range(number_of_pf_circuits - 2): - for j in range(n_time): - variable_name = "pfc{0:02}t{1:02}".format(i + 1, j + 1) - current_arrays[i][j] = mfile_obj.data[variable_name].get_scan(-1) / 1e6 - - # Central solenoid - for m in range(n_time): - variable_name = "cst{0:02}".format(m + 1) - current_arrays[number_of_pf_circuits - 2][m] = ( - mfile_obj.data[variable_name].get_scan(-1) / 1e6 - ) - - # Plasma - for p in range(n_time): - variable_name = "plasmat{0:02}".format(p + 1) - current_arrays[number_of_pf_circuits - 1][p] = ( - mfile_obj.data[variable_name].get_scan(-1) / 1e6 - ) - - # Plotting - - # Axis 1 - axis_1.axhline(y=0, color="k") - axis_1.set_ylabel("Current [MA]") - axis_1.legend(loc="upper left", bbox_to_anchor=(1, 1.05)) - - # limits - ymin_1 = 1.2 * min(current_arrays[-2]) - ymax_1 = 1.2 * max(current_arrays[-2]) - axis_1.set_ylim([ymin_1, ymax_1]) - - for ti in range(len(time)): - axis_1.axvline(x=time[ti], color="k", linestyle="--", linewidth=0.5, alpha=0.5) - - # Axis 2 - axis_2.axhline(y=0, color="k") - axis_2.set_ylabel("Current [MA]") - axis_2.set_ylim([0, 25]) - axis_2.legend(loc="upper left", bbox_to_anchor=(1, 1.05)) - - # limits - ymin_2 = 1.2 * min(current_arrays[-1]) - ymax_2 = 1.2 * max(current_arrays[-1]) - axis_2.set_ylim([ymin_2, ymax_2]) - - for ti in range(len(time)): - axis_2.axvline(x=time[ti], color="k", linestyle="--", linewidth=0.5, alpha=0.5) - - # Axis 3 - ymin_3 = 0 - ymax_3 = 0 - for s in range(number_of_pf_circuits - 2): - axis_3.plot( - time, - current_arrays[s], - label="PF circuit {0}".format(s + 1), - color=color_sequence[s], - ) - - # limits - if 1.2 * min(current_arrays[s]) < ymin_3: - ymin_3 = 1.2 * min(current_arrays[s]) - if 1.2 * max(current_arrays[s]) > ymax_3: - ymax_3 = 1.2 * max(current_arrays[s]) - - axis_3.axhline(y=0, color="k") - axis_3.legend(loc="upper left", bbox_to_anchor=(1, 1.05)) - axis_3.set_xlabel("Time [s]") - axis_3.set_ylabel("Current [MA]") - - # limits - axis_3.set_ylim([ymin_3, ymax_3]) - - for ti in range(len(time)): - axis_3.axvline(x=time[ti], color="k", linestyle="--", linewidth=0.5, alpha=0.5) - - # Time bar plot - y_pos = 0 - left = 0 - patch_handles = list() - - for i in range(len(time_list)): - time_length = mfile_obj.data[time_list[i]].get_scan(-1) - if time_length != 0: - patch_handles.append( - axis_0.barh( - y_pos, - time_length, - color=color_sequence[i], - align="center", - left=left, - height=5, - label=time_list[i], - ) - ) - - # accumulate the left-hand offsets - left += time_length - - axis_0.legend( - bbox_to_anchor=(0, 1.1, 1, 0.2), - loc="lower left", - mode="expand", - borderaxespad=0, - ncol=15, - labelspacing=2, - ) - axis_0.axes.get_yaxis().set_visible(False) - axis_0.tick_params(axis="both", which="both", length=0) - - xticklabels = ( - axis_0.get_xticklabels() + axis_1.get_xticklabels() + axis_2.get_xticklabels() - ) - plt.setp(xticklabels, visible=False) - - # Options - if save: - plt.savefig( - "{0}process_times.png".format(save_path), dpi=300, bbox_inches="tight" - ) - - if show: - - plt.show() - - if return_fig: - return fig - - return - - -def radial_bar_plot( - build_params, mfile_obj, save=False, show=False, return_fig=False, save_path="" -): - """ - Plot the PROCESS radial build bar plot - - Will plot radial build thicknesses - - build_params : list of radial build entry names - mfile_obj : MFILE.DAT object - save : save to "process_times.png" - show : show to screen on running - return_fig : return fig object - """ - - fig = plt.figure(figsize=(18, 2)) - ax = fig.add_subplot(111) - - y_pos = 0 - left = 0 - patch_handles = list() - - for i in range(len(build_params)): - item_thickness = mfile_obj.data[build_params[i]].get_scan(-1) - if item_thickness != 0: - patch_handles.append( - ax.barh( - y_pos, - item_thickness, - color=color_sequence[i], - align="center", - left=left, - height=5, - label=build_params[i], - ) - ) - - # accumulate the left-hand offsets - left += item_thickness - - ax.legend( - bbox_to_anchor=(0, 1.02, 1, 0.2), - loc="lower left", - mode="expand", - borderaxespad=0, - ncol=15, - labelspacing=2, - ) - ax.axes.get_yaxis().set_visible(False) - ax.set_xlabel("Radial Build [m]") - last_tick = left - minor_ticks = np.arange(0, last_tick, 0.1) - ax.set_xticks(minor_ticks, minor=True) - - # Options - if save: - plt.savefig( - "{0}process_radial_plot.png".format(save_path), dpi=300, bbox_inches="tight" - ) - - if show: - plt.show() - - if return_fig: - return fig - - return - - -def plasma_profiles_plot( - mfile_obj, save=False, show=False, return_fig=False, save_path="" -): - """ - Plot the PROCESS plasma profiles - - mfile_obj : MFILE.DAT object - save : save to "process_times.png" - show : show to screen on running - return_fig : return fig object - """ - - fig = plt.figure(dpi=100) - fig.subplots_adjust(wspace=0.5) - axis_1 = fig.add_subplot(131, aspect=0.05) - axis_2 = fig.add_subplot(132, aspect=1 / 35) - axis_3 = fig.add_subplot(133, aspect=1 / 10) - - # Parameters - ipedestal = mfile_obj.data["ipedestal"].get_scan(-1) - rhopedn = mfile_obj.data["rhopedn"].get_scan(-1) - rhopedt = mfile_obj.data["rhopedt"].get_scan(-1) - ne0 = mfile_obj.data["ne0"].get_scan(-1) - neped = mfile_obj.data["neped"].get_scan(-1) - teped = mfile_obj.data["teped"].get_scan(-1) - alphan = mfile_obj.data["alphan"].get_scan(-1) - alphat = mfile_obj.data["alphat"].get_scan(-1) - tesep = mfile_obj.data["tesep"].get_scan(-1) - te0 = mfile_obj.data["te0"].get_scan(-1) - tbeta = mfile_obj.data["tbeta"].get_scan(-1) - q0 = mfile_obj.data["q0"].get_scan(-1) - q95 = mfile_obj.data["q95"].get_scan(-1) - - # density profile plot - - xmin = 0 - xmax = 1 - ymin = 0 - ymax = 20 - axis_1.set_ylim([ymin, ymax]) - axis_1.set_xlim([xmin, xmax]) - axis_1.set_autoscaley_on(False) - axis_1.set_xlabel("r/a") - axis_1.set_ylabel("ne / 1e19 m-3") - axis_1.set_title("Density profile") - - if mfile_obj.data["ipedestal"] == 1: - rhocore1 = np.linspace(0, 0.95 * rhopedn) - rhocore2 = np.linspace(0.95 * rhopedn, rhopedn) - rhocore = np.append(rhocore1, rhocore2) - ncore = neped + (ne0 - neped) * (1 - rhocore**2 / rhopedn**2) ** alphan - - rhosep = np.linspace(rhopedn, 1) - nsep = nesep + (neped - nesep) * (1 - rhosep) / (1 - min(0.9999, rhopedn)) - - rho = np.append(rhocore, rhosep) - ne = np.append(ncore, nsep) - else: - rho1 = np.linspace(0, 0.95) - rho2 = np.linspace(0.95, 1) - rho = np.append(rho1, rho2) - ne = ne0 * (1 - rho**2) ** alphan - ne = ne / 1e19 - axis_1.plot(rho, ne) - - # temperature profile plot - - xmin = 0 - xmax = 1 - ymin = 0 - ymax = 35 - axis_2.set_ylim([ymin, ymax]) - axis_2.set_xlim([xmin, xmax]) - axis_2.set_autoscaley_on(False) - axis_2.set_xlabel("r/a") - axis_2.set_ylabel("Te / keV") - axis_2.set_title("Temperature profile") - - if ipedestal == 1: - rhocore1 = np.linspace(0, 0.9 * rhopedn) - rhocore2 = np.linspace(0.9 * rhopedn, rhopedn) - rhocore = np.append(rhocore1, rhocore2) - tcore = teped + (te0 - teped) * (1 - (rhocore / rhopedn) ** tbeta) ** alphat - - rhosep = np.linspace(rhopedn, 1) - tsep = tesep + (teped - tesep) * (1 - rhosep) / (1 - min(0.9999, rhopedt)) - - rho = np.append(rhocore, rhosep) - te = np.append(tcore, tsep) - else: - rho = np.linspace(0, 1) - te = te0 * (1 - rho**2) ** alphat - axis_2.plot(rho, te) - - # q profile plot - - XMIN = 0 - XMAX = 1 - YMIN = 0 - YMAX = 10 - axis_3.set_ylim([YMIN, YMAX]) - axis_3.set_xlim([XMIN, XMAX]) - axis_3.set_autoscaley_on(False) - axis_3.set_xlabel("r/a") - axis_3.set_ylabel("q(r)") - axis_3.set_title("q profile") - - rho = np.linspace(0, 1) - q_r_nevin = q0 + (q95 - q0) * (rho + rho * rho + rho**3) / (3.0) - q_r_sauter = q0 + (q95 - q0) * (rho * rho) - - axis_3.plot(rho, q_r_nevin, label="Nevin") - axis_3.plot(rho, q_r_sauter, label="Sauter") - axis_3.legend() - - # Options - if save: - plt.savefig( - "{0}process_plasma_profiles.png".format(save_path), - dpi=300, - bbox_inches="tight", - ) - - if show: - plt.show() - - if return_fig: - return fig - - return - - -if __name__ == "__main__": - print("Running in test mode") - mf = mfile.MFile(filename="MFILE.DAT") - plot_pulse_timings(mf, save=True, show=False, save_path="figures/") - radial_build = [ - "bore", - "ohcth", - "precomp", - "gapoh", - "tfcth", - "deltf", - "thshield_ib", - "gapds", - "d_vv_in", - "shldith", - "vvblgap", - "blnkith", - "fwith", - "scrapli", - "rminor", - ] - radial_bar_plot(radial_build, mf, show=False, save=True) - plasma_profiles_plot(mf, show=False, save=True) diff --git a/utilities/process_io_lib/profile_funcs.py b/utilities/process_io_lib/profile_funcs.py deleted file mode 100644 index bd489e4d..00000000 --- a/utilities/process_io_lib/profile_funcs.py +++ /dev/null @@ -1,175 +0,0 @@ -""" -Library functions for the density and temperature profiles in PROCESS -Also contain residual functions for fitting - -Author: Hanni Lux (Hanni.Lux@ccfe.ac.uk) - -Compatible with PROCESS version 379 -""" - - -from scipy.special import gamma -from numpy import array, diff, pi, sin, argmin - -NUMACC = 1e-10 - - -def ncore(rhopedn, nped, nsep, nav, alphan): - - """Calculates the core density from the average density - and other profile parameters""" - - return ( - (1.0 / 3.0) - * (1.0 / rhopedn**2) - * ( - 3.0 * nav * (1.0 + alphan) - + nsep * (1.0 + alphan) * (-2.0 + rhopedn + rhopedn**2) - - nped - * ( - 1.0 - + alphan - + rhopedn - + (alphan * rhopedn) - + (alphan - 2.0) * rhopedn**2 - ) - ) - ) - - -def nprofile(rho, rhopedn, nzero, nped, nsep, alphan): - - """Calculates the density at a given normalised radius rho - depending on the profile parameters""" - - if rho < 0.0: - print("Error: rho undefined!") - elif rho <= rhopedn: - return nped + (nzero - nped) * (1.0 - (rho**2 / rhopedn**2)) ** alphan - elif rho <= 1.0: - return nsep + (nped - nsep) * (1.0 - rho) / (1.0 - rhopedn) - else: - print("Error: rho undefined!") - - -def tcore(rhopedt, tped, tsep, tav, alphat, tbeta): - - """Calculates the core temperature from the average temperature - and other profile parameters""" - - gamfac = ( - gamma(1.0e0 + alphat + 2.0e0 / tbeta) - / gamma((2.0e0 + tbeta) / tbeta) - / rhopedt**2.0e0 - ) - - if (abs(alphat % 1) <= NUMACC) or (abs((alphat % 1) - 1) <= NUMACC): - gamfac = -gamfac / gamma(1.0e0 + alphat) - else: - gamfac = gamfac * gamma(-alphat) * sin(pi * alphat) / pi - - return ( - tped - + ( - tped * rhopedt**2.0e0 - - tav - + (1.0e0 - rhopedt) - / 3.0e0 - * ((1.0e0 + 2.0e0 * rhopedt) * tped + (2.0e0 + rhopedt) * tsep) - ) - * gamfac - ) - - -def tprofile(rho, rhopedt, tzero, tped, tsep, alphat, betat): - - """Calculates the temperature at a given normalised radius rho - depending on the profile parameters""" - - if rho < 0.0: - print("Error: rho undefined!") - elif rho <= rhopedt: - return ( - tped + (tzero - tped) * (1.0 - (rho**betat / rhopedt**betat)) ** alphat - ) - elif rho <= 1.0: - return tsep + (tped - tsep) * (1.0 - rho) / (1.0 - rhopedt) - else: - print("Error: rho undefined!") - - -def nresidual(params, xdata, ydata, dictfitrad): - - """Calculates the residual between a given data set and a given - density profile""" - - alphan, rhoped = params - - nzero = dictfitrad["n0"] - indped = argmin(abs(xdata - rhoped)) - dictfitrad["nped"] = ydata[indped] - nped = dictfitrad["nped"] - nsep = dictfitrad["nsep"] - - ymodel = [nprofile(x, rhoped, nzero, nped, nsep, alphan) for x in xdata] - - return array(ymodel) - ydata - - -def tresidual(params, xdata, ydata, dictfitrad): - - """Calculates the residual between a given data set and a given - temperature profile""" - - alphat, betat, rhoped = params - - tzero = dictfitrad["t0"] - indped = argmin(abs(xdata - rhoped)) - dictfitrad["tped"] = ydata[indped] - tped = dictfitrad["tped"] - tsep = dictfitrad["tsep"] - - ymodel = [tprofile(x, rhoped, tzero, tped, tsep, alphat, betat) for x in xdata] - - return array(ymodel) - ydata - - -def int_f_rho_drho(rhoarr, parr): - - """integrates f * rho drho using a leftsided rectangular sum""" - - ptot = 0.0 - - drhoarr = diff(rhoarr) - - for rho, p, drho in zip(rhoarr[:-1], parr[:-1], drhoarr): - - ptot += p * rho * drho - - return ptot - - -if __name__ == "__main__": - - RHOPEDN = 0.85 - NPED = 1e2 - ALPHAN = 2.0 - NAV = 1.4e2 - NSEP = 0.42 - NCORE = ncore(RHOPEDN, NPED, NSEP, NAV, ALPHAN) - - from pylab import figure, xlabel, ylabel, axvline, axhline, plot, show - from numpy import linspace - - RHO = linspace(0, 1, 50) - - figure() - xlabel(r"$\rho$") - ylabel(r"$n$") - axvline(RHOPEDN, label=r"$\rho_{ped}$") - axhline(NCORE, ls="--") - axhline(NPED, ls=":") - axhline(NSEP) - plot(RHO, [nprofile(x, RHOPEDN, NCORE, NPED, NSEP, ALPHAN) for x in RHO]) - - show() diff --git a/utilities/processgui/README b/utilities/processgui/README deleted file mode 100644 index ae1c3af4..00000000 --- a/utilities/processgui/README +++ /dev/null @@ -1,70 +0,0 @@ -PROCESS GUI README -Tom Miller 09/14 - ---Setup-- -Currently the only way to access the GUI is by running the -development server locally. - -The is a script 'start_gui' that should automate this. Just -type './start_gui' and the GUI should start. There will be -a pause the first time while the server is started. - -*NOTE* If you are working on a Linux desktop machine and already have Firefox - open, trying 'start_gui' on a Fusion Unix Network machine will try to open a - PROCESS GUI session on the already-open Firefox running locally. This will - fail! You need to either close the local Firefox session completely, or open - manually a different browser (e.g. konqueror) and navigate to 127.0.0.1:8000 - in the address bar. - -To launch the server manually: -You need to add the django library to your PYTHONPATH. If you are working -on the Fusion Unix Network, you can do this by running the line -export PYTHONPATH=$PYTHONPATH:'/home/PROCESS/django/Django-1.7/build/lib/' - -then start the server by typing -python manage.py runserver -in the top level directory of the project. - -The GUI should now be visible if you open a web browser (on the same -machine only) and navigate to 127.0.0.1:8000 - ---Using GUI-- -Currently you can upload two files. One file that is available for -editing, and a reference file that can be used to compare variables -between one file and another. Differences should be highlighted in -red. Both files start at default values when you first open the page. - -Each variable is listed with its current value, followed by the -reference value and a short description. A longer description -should appear if you hover over the short description. - -The Constraint Equations and Iteration Variables sections have -checkboxes that can be used to add and remove equations and -variables from icc and ixc. neqns and nvar are calculated automatically. - -Variables listed in more than one place in the GUI should automatically -sync their values - editing the variable in one place should change -the value in every place the variable appears. - -The two buttons 'Expand all' and 'Close all' open and close every module -heading. This can be useful if you want to search for a variable name -with Ctrl-f - - ---IN.DAT format-- -The 'Save' button in the GUI should offer you the choice to open the file -with a text editor or save the file. Your web browser might save -the file in a 'Downloads' folder without prompting you. This can be changed -in your web browser settings. - -The IN.DAT is produced with a similar layout to the GUI, with every variable -listed in alphabetical order under module headings. - -Comments should appear next to each variable. In the -case of an integer switch, the program will try and work out the meaning of -the value selected. This will be placed after an asterix in the comment eg. -idensl = 7 * Switch for density limit to enforce * greenwald limit - -If you want to convert an old IN.DAT file to this format without manually -opening and saving the file in the GUI, running the newindat.py file in lib/ -will do this. Use as ./newindat.py OLD_IN.DAT NEW_IN.DAT diff --git a/utilities/processgui/db.sqlite3 b/utilities/processgui/db.sqlite3 deleted file mode 100644 index 04310e4b..00000000 Binary files a/utilities/processgui/db.sqlite3 and /dev/null differ diff --git a/utilities/processgui/dicts/make_gui_dicts.py b/utilities/processgui/dicts/make_gui_dicts.py deleted file mode 100755 index c414816e..00000000 --- a/utilities/processgui/dicts/make_gui_dicts.py +++ /dev/null @@ -1,252 +0,0 @@ -#!/usr/bin/env python3 -"""This script creates several dictionaries and lists for use by the GUI. - - GUI_MODULE --> OrderedDict that sets the ordering of the main body of the UI. - Keys are module names, values are lists of - (variable_type, variable_name, shortdescription, description) - tuples - - variable_type can be one of: - "float", "int", "array", "string" - - shortdescription is the text displayed next to the variable - - description is the text displayed when the shortdescription - if hovered over - - description and shortdescription are obtained from - DICT_DESCRIPTION in process_dicts, but with removal of - characters such as " that could cause problems - - GUI_LABLXC --> OrderedDict that sets iteration variable ordering. Keys - are ixc no., values are - (name, shortdesc, desc) tuples where 'name' is name - of variable - - GUI_LABLCC --> OrderedDict that sets constraint equation ordering. Keys - are icc no., values are constraint descriptions. - - Tom Miller 09/14 - -""" - -import re -from collections import OrderedDict -import logging -import copy -import argparse -from create_dicts import print_dict -from process_io_lib.process_dicts import ( - DICT_DEFAULT, - DICT_IXC_FULL, - DICT_DESCRIPTIONS, - DICT_MODULE, -) - - -def print_list(li, name, comment=""): - """Prints a list with format: - #comment - name = [ - li[0], - li[1], - ... - ] - - """ - if len(li) == 0: - return - print("\n#" + comment) - print(name + " = [") - for value in li: - print("\t", value, ",") - print("]") - - -def parse_labl(labl): - """Parses a 'labl' format description from DICT_DESCRIPTION - eg ( 1) * aspect - ( 2) * bt - Returns a list of (number, checked, desc) tuples where 'number' is - number in brackets, 'checked' indicates whether an asterix is present - and 'desc' is everything that follows - - """ - ret = [] - for line in labl.split("\n"): - regex = r"""\(([\d ]+)\) #capture the digits and whitespace - #in brackets at the beginning - - \s*(\*?)\s* #is there a * before the description? - (.*) - """ - ma = re.match(regex, line, re.VERBOSE) - if ma: - num = int(ma.group(1)) - if ma.group(2) == "*": - checked = True - else: - checked = False - desc = ma.group(3).strip() - if desc == "UNUSED": - continue - ret.append((num, checked, desc)) - return ret - - -def get_desc(name): - """Gets an entry from DICT_DESCRIPTIONS and returns the short style - description and the long style description with character " removed - - """ - - desc = DICT_DESCRIPTIONS[name] - # make sure no " characters are passed to html - if '"' in desc: - logging.warning('Removing " characters from ' + name + " description") - desc = desc.replace('"', "") - - shortdesc = desc.split("\n")[0] - return shortdesc, desc - - -def main(): - """Main program routine. Creates and prints the dictionaries""" - temp_dict_module = copy.deepcopy(DICT_MODULE) - dict_module = {} - - for modulename, value in temp_dict_module.items(): - if modulename.endswith(" Variables"): - modulename = modulename[:-10] - dict_module[modulename] = value - - # get rid of inertial confinement and reverse field pinch settings - del dict_module["Ife"] - del dict_module["Rfp"] - - # get rid of ixc, icc, boundl, boundu - # these are dealt with seperately - dict_module["Numerics"].remove("ixc") - dict_module["Numerics"].remove("icc") - dict_module["Numerics"].remove("neqns") - dict_module["Numerics"].remove("nvar") - dict_module["Numerics"].remove("boundl") - dict_module["Numerics"].remove("boundu") - - # get rid of runtitle - # dict_module["Global"].remove("runtitle") - - gui_module = OrderedDict() - # make gui_module - for module, varlist in dict_module.items(): - tuplist = [] - for varname in varlist: - desctup = get_desc(varname) - if isinstance(DICT_DEFAULT[varname], list): - tuplist.append(("array", varname) + desctup) - elif isinstance(DICT_DEFAULT[varname], str): - tuplist.append(("string", varname) + desctup) - elif isinstance(DICT_DEFAULT[varname], float): - tuplist.append(("float", varname) + desctup) - elif isinstance(DICT_DEFAULT[varname], int): - tuplist.append(("int", varname) + desctup) - else: - continue - - gui_module[module] = tuplist - - # set of all variable names in main body, not printed - gui_module_var_set = set() - for varlist in dict_module.values(): - gui_module_var_set.update(varlist) - - gui_lablxc = OrderedDict() - # make the LABLXC dict - for num, dummy, dummy in parse_labl(DICT_DESCRIPTIONS["lablxc"]): - name = DICT_IXC_FULL[str(num)]["name"] - # check that the name is included in the description given in lablxc - assert name in dummy - - if "obsolete" in dummy.lower(): - logging.warning( - " Iteration variable " - + name - + " is marked obsolete in lablxc so is being excluded" - ) - continue - - # tftort should be marked obsolete - if name == "tftort": - continue - - if name not in gui_module_var_set: - logging.warning( - " Iteration variable " - + name - + " does not appear in main body of gui_module so is being excluded" - ) - continue - - # The description given in DICT_DESCRIPTION is better than in lablxc - shortdesc, desc = get_desc(name) - # descriptions like (iteration variable 2) are redundant when in - # the GUI - shortdesc = shortdesc.split(" (iter")[0] - desctup = (shortdesc, desc) - gui_lablxc[num] = (name,) + desctup - - gui_lablcc = OrderedDict() - # Create GUI_LABLCC - for num, dummy, desc in parse_labl(DICT_DESCRIPTIONS["lablcc"]): - desc = desc.capitalize().replace('"', "") - if "Unused" in desc: - logging.warning( - " Constraint " - + str(num) - + " is marked unused " - + "so is being excluded" - ) - continue - gui_lablcc[num] = desc - - # Print the header - header = """ -\"\"\" -This file contains dictionaries for use by the PROCESS GUI. -GUI_MODULE : Sets the ordering of main body variable -GUI_LABLXC : Sets the ordering of iteration variables -GUI_LABLCC : Sets the ordering of constraint equations - -\"\"\" - -from collections import OrderedDict - """ - print(header) - - # print everything - print("\n#List that sets order of main body variables") - print("GUI_MODULE = OrderedDict()") - for modname, varlist in sorted(gui_module.items()): - print("GUI_MODULE['" + modname + "'] = [") - for tup in varlist: - print("\t", tup, ",") - print("\t]") - - comment = "List that sets ixc ordering" - print_dict(gui_lablxc, "GUI_LABLXC", comment, dict_type="OrderedDict()") - - comment = "List that sets icc ordering" - print_dict(gui_lablcc, "GUI_LABLCC", comment, dict_type="OrderedDict()") - - -if __name__ == "__main__": - - desc = ( - "Produces dictionaries for use by the GUI. Prints to stdout. " - + "Redirect to file using '>'" - ) - PARSER = argparse.ArgumentParser(description=desc) - - PARSER.parse_args() - - main() diff --git a/utilities/processgui/lib/guiindat.py b/utilities/processgui/lib/guiindat.py deleted file mode 100755 index e9b87a02..00000000 --- a/utilities/processgui/lib/guiindat.py +++ /dev/null @@ -1,476 +0,0 @@ -#!/usr/bin/env python3 - -"""Contains a class that is used to read and write IN.DAT files. - files. Can be used to convert an old IN.DAT file to dictionary - or vice versa. - When this file is run as a program, converts IN.DAT file to new - format. - - Tom Miller 09/14 - -""" - -from process_io_lib.process_dicts import ( - DICT_DEFAULT, - DICT_MODULE, - DICT_DESCRIPTIONS, - DICTIONARY_VERSION, - DICT_IXC_SIMPLE, -) -import copy -import argparse - -# variables in input file but not in dictionaries. -OMISSIONS = list() - -# ioptimz values -ioptimz_des = { - "-1": "for no optimisation HYBRD only", - "0": "for HYBRD and VMCON (not recommended)", - "1": "for optimisation VMCON only", -} - - -class BColours: - HEADER = "\033[95m" - OKBLUE = "\033[94m" - OKGREEN = "\033[92m" - WARNING = "\033[93m" - FAIL = "\033[91m" - ENDC = "\033[0m" - BOLD = "\033[1m" - UNDERLINE = "\033[4m" - - -def is_valid(varname): - """Tests to see if a varname is a valid variable to have in the IN.DAT - by seeing if it appears in DICT_DEFAULT - - """ - if varname not in DICT_DEFAULT: - error_msg = ( - BColours.FAIL + "Unrecognised input variable: {0}. " - "Variable will be skipped".format(varname) + BColours.ENDC - ) - print(error_msg) - OMISSIONS.append(varname) - # raise ValueError(error_msg) - return True - - -def get_type(varname): - """Gets the type of a varname. Undestands arrays ie. ixc(2) is int, - dcond(3) is float - - """ - if "(" in varname: - array_name = varname.split("(")[0] - is_valid(array_name) - return type(DICT_DEFAULT[array_name][0]) - else: - is_valid(varname) - return type(DICT_DEFAULT[varname]) - - -def mimic_type(in_val, tar): - """Converts in_val to have the same type as tar""" - if isinstance(in_val, str): - in_val = in_val.strip(", ") - if isinstance(tar, list): - # allow trailing commas for lists - in_val = str(in_val) - in_val = in_val.strip("[]") - ret = [] - for i in in_val.split(","): - # cast every variable in the list - ret.append(mimic_type(i, tar[0])) - return ret - elif isinstance(tar, int): - return int(in_val) - elif isinstance(tar, float): - return float(str(in_val).lower().replace("d", "e")) - elif isinstance(tar, str): - # remove all quote marks and spaces - return str(in_val).strip("\"' ") - else: - raise ValueError("Unknown type for variable passed to mimic_type") - - -def cast(varname, val): - """Casts a string input from user 'val' to a python variable of - the same type as DICT_DEFAULT[varname] - - """ - try: - if "(" in varname: - array_name = varname.split("(")[0] - return mimic_type(val, DICT_DEFAULT[array_name][0]) - else: - if varname not in OMISSIONS: - return mimic_type(val, DICT_DEFAULT[varname]) - except ValueError: - error_msg = ( - BColours.FAIL + "Could not cast {0} to type {1} for" - "variable {2}. Please check DICT_DEFAULT for parameter type.".format( - str(val), str(type(DICT_DEFAULT[varname])), varname - ) - + BColours.ENDC - ) - print(error_msg) - # raise ValueError("Could not cast " + str(val) + " to type " + - # str(type(DICT_DEFAULT[varname])) + " for variable "+ - # varname) - - -def interpret_array_var(st): - """Interprets a fortran style array access eg boundl(4). Returns - the array name and python style index - eg boundl, 3 - """ - st = st.split(")")[0] - array_name, array_st_index = st.split("(") - is_valid(array_name) - return array_name, int(array_st_index) - 1 - - -def parse_int_line(line, char): - """Helper function for get_comment. Splits the line by char, - attempts to convert the first chunk to an int, returns the number - and rest of the line if succesful, None otherwise - """ - assert len(char) == 1 - li = line.split(char) - try: - num = int(li[0]) - except ValueError: - return None - return num, char.join(li[1:]).strip() - - -def get_line_by_int(num, desc): - """Looks through the line in desc for a line that looks like it's - describing the value of the variable. See if the line starts with - = #, (#), # where # represents an integer. If this integer = num or - -num, return the rest of the line. - """ - for line in desc.split("\n"): - line = line.strip() - # look for '= #' type lines - if line[0] == "=": - line = line[1:].strip() - t = parse_int_line(line, " ") - if t and (t[0] == num or t[0] == -num): - return t[1].strip("* ") - # look for (#) style lines - if line[0] == "(": - line = line[1:].strip() - t = parse_int_line(line, ")") - if t and (t[0] == num or t[0] == -num): - return t[1].strip("* ") - # look for lines that start with an int - if line[0].isdigit(): - t = parse_int_line(line, " ") - if t and (t[0] == num or t[0] == -num): - return t[1].strip("* ") - return "" - - -def get_comment(val, desc): - """Gets the description that should be printed in the IN.DAT for - #a variable. For non-integer variables, this is the first line - #of GUI_DESCRIPTION[var]. For integer switches, try to work out - #which switch is selected - """ - - if desc.strip() == "": - return "" - # commas, full stops, colons must not appear in comments - desc = desc.replace(",", ";") - desc = desc.replace(".", ";") - desc = desc.replace(":", "*") - firstline = desc.split("\n")[0].strip("* ") - if not isinstance(val, int): - # for non ints, just use the first line of the desc - return "* " + firstline - - # for ints, get rid of parenthesis - ret = firstline.split("(")[0].strip("* ") - - # then try and describe the value taken - int_desc = get_line_by_int(val, desc) - if int_desc: - return "* " + ret + " * " + int_desc - else: - return "* " + ret - - -def make_mod_header(st): - """Makes the header line for a module""" - return "*" + st.center(50, "-") + "*" - - -def make_line(var, val, comment=True): - """Returns a var = val * comment line - if comment = False, don't try and find a comment - - """ - assignstr = (str(var) + " = " + str(val)).ljust(15) - if comment and var in DICT_DESCRIPTIONS: - if var == "ioptimz": - desc = ioptimz_des[str(val)] - else: - desc = get_comment(val, DICT_DESCRIPTIONS[var]) - else: - desc = "" - - return assignstr + " " + desc - - -def strip_line(line): - """Strips of irrelevant parts of a line such as comments, old-style - module headings - - """ - line = line.split("*")[0] - line = line.strip() - line = line.rstrip("\r") - line = line.rstrip(",") - if line == "" or line[0] == "$": - return "" - return line - - -class GuiInDat(object): - """Class that handles reading IN.DATs and writing them in a readable - format - """ - - def __init__(self, filename=""): - self.__data__ = {} - self.Run_Description = "" - if filename: - self.read_file(filename) - - def __getitem__(self, key): - key = key.lower() - if "(" in key: - # work out which array and which index is being accessed - array_name, array_index = interpret_array_var(key) - is_valid(array_name) - if array_name not in self.__data__: - # if it's a valid array but not in the dictionary, copy the - # dictionary value - self.__data__[array_name] = copy.copy(DICT_DEFAULT[array_name]) - return self.__data__[array_name][array_index] - else: - is_valid(key) - if key not in self.__data__: - self.__data__[key] = copy.copy(DICT_DEFAULT[key]) - return self.__data__[key] - - def __setitem__(self, key, val): - key = key.lower() - if "(" in key: - array_name, array_index = interpret_array_var(key) - is_valid(array_name) - # cast the value to the correct type - val = cast(key, val) - if array_name not in self.__data__: - self.__data__[array_name] = copy.copy(DICT_DEFAULT[array_name]) - self.__data__[array_name][array_index] = val - else: - is_valid(key) - val = cast(key, val) - self.__data__[key] = val - - def __delitem__(self, key): - key = key.lower() - if key in self.__data__: - del self.__data__[key] - - def is_diff(self, key): - """Returns true if a variable has a value in this IN.DAT - different to the default - - """ - if key in self.__data__: - if self.__data__[key] != DICT_DEFAULT[key]: - return True - return False - - def clip(self): - """Chops elements beyond nvar and neqns off the end of ixc - and icc - """ - self["ixc"] = self["ixc"][: self["nvar"]] - self["icc"] = self["icc"][: self["neqns"]] - - def __str__(self): - """Convets self to text format""" - LB = "\n" # line break character - ret = "" - - # print the descritption - for line in self.Run_Description.split("\n"): - ret += "*****" + line.rstrip() + LB - ret += LB + make_mod_header("-") + LB + LB - - # constraints - ret += make_mod_header("Constraint Equations") + LB - neqns = self["neqns"] - ret += make_line("neqns", neqns, False) + LB - - # always print in sorted order - icc_temp = sorted(self["icc"][:neqns]) - for num in range(self["neqns"]): - const_num = icc_temp[num] - ret += make_line("icc(" + str(num + 1) + ")", const_num, False) - ret += " * " - # get the description of the constraint from lablcc - ret += get_line_by_int(const_num, DICT_DESCRIPTIONS["lablcc"]) - ret += LB - ret += LB - - # itervars - ret += make_mod_header("Iteration Variables") + LB - nvar = self["nvar"] - ret += make_line("nvar", nvar, False) + LB - # always print in sorted order - ixc_temp = sorted(self["ixc"][:nvar]) - for num in range(self["nvar"]): - iter_num = ixc_temp[num] - ubname = "boundu(" + str(iter_num) + ")" - lbname = "boundl(" + str(iter_num) + ")" - ret += make_line("ixc(" + str(num + 1) + ")", iter_num, False) - ret += " * " - - # write a description - varname = DICT_IXC_SIMPLE[str(iter_num)] - ret += varname + " " - ret += get_comment(0.1, DICT_DESCRIPTIONS[varname]) - ret += LB - - # print the lower and upper bounds below - ret += make_line(lbname, self[lbname], False) + LB - ret += make_line(ubname, self[ubname], False) + LB - ret += LB - - # don't print these, they've already been printed - neverprint = {"neqns", "nvar", "ixc", "icc", "boundl", "boundu"} - # these are important, always print them - alwaysprint = {"ioptimz", "minmax"} - - # if isweep isn't 0, always print sweep information - if self["isweep"] != 0: - alwaysprint.add("nsweep") - alwaysprint.add("sweep") - - # print the main body - for module, arr in DICT_MODULE.items(): - ret += make_mod_header(module) + LB - - for varname in arr: - # don't print these, they have already been printed - if varname in neverprint: - continue - # if the value is different or is an important variable - if self.is_diff(varname) or varname in alwaysprint: - val = self[varname] - if isinstance(val, list): - # don't print the python style [ ] array characters - ret += make_line(varname, str(val)[1:-1]) + LB - else: - ret += make_line(varname, val) + LB - - ret += LB + LB - - return ret - - def add_dict(self, di): - """Given a dictionary of values, adds them to the IN.DAT. Values can be - given in string format and will be casted automatically - - """ - for key, value in di.items(): - self[key] = value - self.clip() - - def readlines(self, inputlines): - """Iterates through a list of lines and adds values to the dictionary - or the header - - """ - lines = [] - currentline = "" - # first we need to make a pass and merge together run-on lines - # eg IXC = 1, 2, 3, 4 - # 5, 6, 7 - # becomes IXC = 1,2,3,4,5 - for line in inputlines: - if line == "": - continue - if line[:5] == "*****": - line = line.rstrip() - self.Run_Description += line[5:] + "\n" - continue - line = strip_line(line) - if line == "": - continue - # if a new variable assignment starts on this line - if "=" in line: - # save the accumulated line - lines.append(currentline) - # start a new line - currentline = line - else: - # otherwise concat this line onto the last with a comma - # seperator if needed - if currentline: - currentline += "," + line - else: - currentline = line - - lines.append(currentline) - if lines[0] and "=" not in lines[0]: - raise ValueError( - "First line '" + lines[0] + "'" + "does not contain '=' sign" - ) - - self.Run_Description = self.Run_Description.strip() - - # now make the assignments - for line in lines: - if line == "": - continue - varname, varval = line.split("=")[:2] - self[varname.strip()] = varval.strip() - - # cut off the end of ixc and icc if neqns and nvar are set low enough - self.clip() - - def read_file(self, filename): - """Opens a file and reads it in""" - self.readlines(open(filename).readlines()) - - def write_file(self, filename): - """Writes to a given filename""" - file_handle = open(filename, "w") - file_handle.write(str(self)) - - -# if called as main function, convert an IN.DAT -if __name__ == "__main__": - PROGDESC = ( - "Reads an IN.DAT file and prints an IN.DAT file of the same " - + "format as used by the GUI. Redirect to output file using '>'" - ) - - PARSER = argparse.ArgumentParser(description=PROGDESC) - PARSER.add_argument("inputfile", help="IN.DAT file to read from") - PARSER.add_argument("outputfile", help="IN.DAT file to read from") - ARGS = PARSER.parse_args() - - print("Using dictionary for PROCESS v." + str(DICTIONARY_VERSION)) - in_dat_obj = GuiInDat(ARGS.inputfile) - file_handle = open(ARGS.outputfile, "w") - file_handle.write(str(in_dat_obj)) - file_handle.close() diff --git a/utilities/processgui/lib/mylib.py b/utilities/processgui/lib/mylib.py deleted file mode 100755 index e888ff46..00000000 --- a/utilities/processgui/lib/mylib.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Useful functions for the GUI - -""" - -from dicts.gui_dicts import GUI_MODULE, GUI_LABLXC, GUI_LABLCC - -from lib.guiindat import GuiInDat - - -def dict_to_in_dat(di): - """Converts a dictionary recieved from the client to a - GuiInDat object. The ixc and icc arrays are created - from the itervar_#, constraint_# style checkboxes - - """ - in_dat = GuiInDat() - for dummy, tuplist in GUI_MODULE.items(): - # all of the usual variables that appear in GUI_MODULE - # can just be assigned - for tu in tuplist: - varname = tu[1] - in_dat[varname] = di[varname] - - # do iteration variables - ixc = [] - for num in GUI_LABLXC.keys(): - itername = "itervar_" + str(num) - boundlname = "boundl(" + str(num) + ")" - bounduname = "boundu(" + str(num) + ")" - # add to ixc if variable is set as itervar - if itername in di and di[itername] == "on": - ixc.append(num) - # copy the bounds - in_dat[boundlname] = di[boundlname] - in_dat[bounduname] = di[bounduname] - - in_dat["ixc"] = ixc - in_dat["nvar"] = len(ixc) - - # do constraint equations - icc = [] - # f = open('test_text.txt', 'w') - # for k in di: - # f.write('\n' + k) - # f.write('\n\nNext is GUI_LABCC keys') - for num in GUI_LABLCC.keys(): - # f.write('\n' + str(num)) - constname = "constraint_" + str(num) - if constname in di and di[constname] == "on": - icc.append(num) - # f.write('\n' + constname) - # f.close() - - in_dat["icc"] = icc - in_dat["neqns"] = len(icc) - - # do description - if "Run_Description" in di: - in_dat.Run_Description = di["Run_Description"] - - return in_dat - - -def split_dicts(request): - """Converts a submission from the client to two dictionaries. - Reference values start with 'ref_'. These are split into - their own dictionary so they can be dealt with seperately - - """ - assert request.method == "POST" - in_dict = {} - ref_dict = {} - for key, value in request.POST.items(): - if "ref_" in key: - ref_dict[key[4:]] = value - else: - in_dict[key] = value - - return in_dict, ref_dict diff --git a/utilities/processgui/manage.py b/utilities/processgui/manage.py deleted file mode 100755 index 5afe3930..00000000 --- a/utilities/processgui/manage.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python -import os -import sys - -if __name__ == "__main__": - os.environ.setdefault("DJANGO_SETTINGS_MODULE", "processgui.settings") - - from django.core.management import execute_from_command_line - - execute_from_command_line(sys.argv) diff --git a/utilities/processgui/processgui/settings.py b/utilities/processgui/processgui/settings.py deleted file mode 100644 index 753c38b6..00000000 --- a/utilities/processgui/processgui/settings.py +++ /dev/null @@ -1,91 +0,0 @@ -""" -Django settings for ui project. - -For more information on this file, see -https://docs.djangoproject.com/en/1.7/topics/settings/ - -For the full list of settings and their values, see -https://docs.djangoproject.com/en/1.7/ref/settings/ -""" - -# Build paths inside the project like this: os.path.join(BASE_DIR, ...) -import os - -BASE_DIR = os.path.dirname(os.path.dirname(__file__)) - - -# Quick-start development settings - unsuitable for production -# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ - -# SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = "h@(&(2ox-4r=9s-$u@spg6@&wi*0b$q3hd3b2pjx^46y+l2w8o" - -# SECURITY WARNING: don't run with debug turned on in production! -DEBUG = True - -TEMPLATE_DEBUG = True - -ALLOWED_HOSTS = [] - - -# Application definition - -INSTALLED_APPS = ( - "django.contrib.admin", - "django.contrib.auth", - "django.contrib.contenttypes", - "django.contrib.sessions", - "django.contrib.messages", - "django.contrib.staticfiles", - "ui", -) - -MIDDLEWARE_CLASSES = ( - "django.contrib.sessions.middleware.SessionMiddleware", - "django.middleware.common.CommonMiddleware", - "django.middleware.csrf.CsrfViewMiddleware", - "django.contrib.auth.middleware.AuthenticationMiddleware", - "django.contrib.auth.middleware.SessionAuthenticationMiddleware", - "django.contrib.messages.middleware.MessageMiddleware", - "django.middleware.clickjacking.XFrameOptionsMiddleware", -) - -ROOT_URLCONF = "processgui.urls" - -WSGI_APPLICATION = "processgui.wsgi.application" - - -# Database -# https://docs.djangoproject.com/en/1.7/ref/settings/#databases - -DATABASES = { - "default": { - "ENGINE": "django.db.backends.sqlite3", - "NAME": os.path.join(BASE_DIR, "db.sqlite3"), - } -} - -# Internationalization -# https://docs.djangoproject.com/en/1.7/topics/i18n/ - -LANGUAGE_CODE = "en-us" - -TIME_ZONE = "UTC" - -USE_I18N = True - -USE_L10N = True - -USE_TZ = True - - -# Static files (CSS, JavaScript, Images) -# https://docs.djangoproject.com/en/1.7/howto/static-files/ - -STATIC_PATH = os.path.join(BASE_DIR, "static") - -STATICFILES_DIRS = (STATIC_PATH,) - -STATIC_URL = "/static/" - -TEMPLATE_DIRS = (os.path.join(BASE_DIR, "ui/templates/"),) diff --git a/utilities/processgui/processgui/urls.py b/utilities/processgui/processgui/urls.py deleted file mode 100644 index 309c8a86..00000000 --- a/utilities/processgui/processgui/urls.py +++ /dev/null @@ -1,12 +0,0 @@ -from django.conf.urls import patterns, include, url -from django.contrib.staticfiles.urls import staticfiles_urlpatterns -from django.contrib import admin - -urlpatterns = ( - patterns( - "", - url(r"^$", "ui.views.index"), - url(r"^admin/", include(admin.site.urls)), - ) - + staticfiles_urlpatterns() -) diff --git a/utilities/processgui/processgui/views.py b/utilities/processgui/processgui/views.py deleted file mode 100644 index e69de29b..00000000 diff --git a/utilities/processgui/processgui/wsgi.py b/utilities/processgui/processgui/wsgi.py deleted file mode 100644 index 09343074..00000000 --- a/utilities/processgui/processgui/wsgi.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -WSGI config for ui project. - -It exposes the WSGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ -""" - -import os -from django.core.wsgi import get_wsgi_application - -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "processgui.settings") - - -application = get_wsgi_application() diff --git a/utilities/processgui/start_gui b/utilities/processgui/start_gui deleted file mode 100755 index a1e5fd4f..00000000 --- a/utilities/processgui/start_gui +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -#if django not in path -if [[ $PYTHONPATH != *django* ]]; then - export PYTHONPATH=$PYTHONPATH':/home/PROCESS/django/Django-1.7/build/lib/' -fi - -#need to run manage.py from its containing directory -dirloc=`dirname $0` -cd $dirloc - -#if the user doesn't already have the server running -if [[ `ps u` != *"manage.py runserver"* ]]; then - echo "Starting PROCESS GUI server" - python manage.py runserver # &> /dev/null & - sleep 5 #probably long enough -fi - -firefox 127.0.0.1:8000 &> /dev/null & diff --git a/utilities/processgui/static/external/jquery/jquery.js b/utilities/processgui/static/external/jquery/jquery.js deleted file mode 100644 index c5c64825..00000000 --- a/utilities/processgui/static/external/jquery/jquery.js +++ /dev/null @@ -1,9789 +0,0 @@ -/*! - * jQuery JavaScript Library v1.10.2 - * http://jquery.com/ - * - * Includes Sizzle.js - * http://sizzlejs.com/ - * - * Copyright 2005, 2013 jQuery Foundation, Inc. and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2013-07-03T13:48Z - */ -(function( window, undefined ) { - -// Can't do this because several apps including ASP.NET trace -// the stack via arguments.caller.callee and Firefox dies if -// you try to trace through "use strict" call chains. (#13335) -// Support: Firefox 18+ -//"use strict"; -var - // The deferred used on DOM ready - readyList, - - // A central reference to the root jQuery(document) - rootjQuery, - - // Support: IE<10 - // For `typeof xmlNode.method` instead of `xmlNode.method !== undefined` - core_strundefined = typeof undefined, - - // Use the correct document accordingly with window argument (sandbox) - location = window.location, - document = window.document, - docElem = document.documentElement, - - // Map over jQuery in case of overwrite - _jQuery = window.jQuery, - - // Map over the $ in case of overwrite - _$ = window.$, - - // [[Class]] -> type pairs - class2type = {}, - - // List of deleted data cache ids, so we can reuse them - core_deletedIds = [], - - core_version = "1.10.2", - - // Save a reference to some core methods - core_concat = core_deletedIds.concat, - core_push = core_deletedIds.push, - core_slice = core_deletedIds.slice, - core_indexOf = core_deletedIds.indexOf, - core_toString = class2type.toString, - core_hasOwn = class2type.hasOwnProperty, - core_trim = core_version.trim, - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - // The jQuery object is actually just the init constructor 'enhanced' - return new jQuery.fn.init( selector, context, rootjQuery ); - }, - - // Used for matching numbers - core_pnum = /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source, - - // Used for splitting on whitespace - core_rnotwhite = /\S+/g, - - // Make sure we trim BOM and NBSP (here's looking at you, Safari 5.0 and IE) - rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/, - - // Match a standalone tag - rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>|)$/, - - // JSON RegExp - rvalidchars = /^[\],:{}\s]*$/, - rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g, - rvalidescape = /\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g, - rvalidtokens = /"[^"\\\r\n]*"|true|false|null|-?(?:\d+\.|)\d+(?:[eE][+-]?\d+|)/g, - - // Matches dashed string for camelizing - rmsPrefix = /^-ms-/, - rdashAlpha = /-([\da-z])/gi, - - // Used by jQuery.camelCase as callback to replace() - fcamelCase = function( all, letter ) { - return letter.toUpperCase(); - }, - - // The ready event handler - completed = function( event ) { - - // readyState === "complete" is good enough for us to call the dom ready in oldIE - if ( document.addEventListener || event.type === "load" || document.readyState === "complete" ) { - detach(); - jQuery.ready(); - } - }, - // Clean-up method for dom ready events - detach = function() { - if ( document.addEventListener ) { - document.removeEventListener( "DOMContentLoaded", completed, false ); - window.removeEventListener( "load", completed, false ); - - } else { - document.detachEvent( "onreadystatechange", completed ); - window.detachEvent( "onload", completed ); - } - }; - -jQuery.fn = jQuery.prototype = { - // The current version of jQuery being used - jquery: core_version, - - constructor: jQuery, - init: function( selector, context, rootjQuery ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && (match[1] || !context) ) { - - // HANDLE: $(html) -> $(array) - if ( match[1] ) { - context = context instanceof jQuery ? context[0] : context; - - // scripts is true for back-compat - jQuery.merge( this, jQuery.parseHTML( - match[1], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - // Properties of context are called as methods if possible - if ( jQuery.isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[2] ); - - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE and Opera return items - // by name instead of ID - if ( elem.id !== match[2] ) { - return rootjQuery.find( selector ); - } - - // Otherwise, we inject the element directly into the jQuery object - this.length = 1; - this[0] = elem; - } - - this.context = document; - this.selector = selector; - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || rootjQuery ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this.context = this[0] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) { - return rootjQuery.ready( selector ); - } - - if ( selector.selector !== undefined ) { - this.selector = selector.selector; - this.context = selector.context; - } - - return jQuery.makeArray( selector, this ); - }, - - // Start with an empty selector - selector: "", - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return core_slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - return num == null ? - - // Return a 'clean' array - this.toArray() : - - // Return just the object - ( num < 0 ? this[ this.length + num ] : this[ num ] ); - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - ret.context = this.context; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - // (You can seed the arguments with an array of args, but this is - // only used internally.) - each: function( callback, args ) { - return jQuery.each( this, callback, args ); - }, - - ready: function( fn ) { - // Add the callback - jQuery.ready.promise().done( fn ); - - return this; - }, - - slice: function() { - return this.pushStack( core_slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[j] ] : [] ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map(this, function( elem, i ) { - return callback.call( elem, i, elem ); - })); - }, - - end: function() { - return this.prevObject || this.constructor(null); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: core_push, - sort: [].sort, - splice: [].splice -}; - -// Give the init function the jQuery prototype for later instantiation -jQuery.fn.init.prototype = jQuery.fn; - -jQuery.extend = jQuery.fn.extend = function() { - var src, copyIsArray, copy, name, options, clone, - target = arguments[0] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - target = arguments[1] || {}; - // skip the boolean and the target - i = 2; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !jQuery.isFunction(target) ) { - target = {}; - } - - // extend jQuery itself if only one argument is passed - if ( length === i ) { - target = this; - --i; - } - - for ( ; i < length; i++ ) { - // Only deal with non-null/undefined values - if ( (options = arguments[ i ]) != null ) { - // Extend the base object - for ( name in options ) { - src = target[ name ]; - copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { - if ( copyIsArray ) { - copyIsArray = false; - clone = src && jQuery.isArray(src) ? src : []; - - } else { - clone = src && jQuery.isPlainObject(src) ? src : {}; - } - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend({ - // Unique for each copy of jQuery on the page - // Non-digits removed to match rinlinejQuery - expando: "jQuery" + ( core_version + Math.random() ).replace( /\D/g, "" ), - - noConflict: function( deep ) { - if ( window.$ === jQuery ) { - window.$ = _$; - } - - if ( deep && window.jQuery === jQuery ) { - window.jQuery = _jQuery; - } - - return jQuery; - }, - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Hold (or release) the ready event - holdReady: function( hold ) { - if ( hold ) { - jQuery.readyWait++; - } else { - jQuery.ready( true ); - } - }, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). - if ( !document.body ) { - return setTimeout( jQuery.ready ); - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - - // Trigger any bound ready events - if ( jQuery.fn.trigger ) { - jQuery( document ).trigger("ready").off("ready"); - } - }, - - // See test/unit/core.js for details concerning isFunction. - // Since version 1.3, DOM methods and functions like alert - // aren't supported. They return false on IE (#2968). - isFunction: function( obj ) { - return jQuery.type(obj) === "function"; - }, - - isArray: Array.isArray || function( obj ) { - return jQuery.type(obj) === "array"; - }, - - isWindow: function( obj ) { - /* jshint eqeqeq: false */ - return obj != null && obj == obj.window; - }, - - isNumeric: function( obj ) { - return !isNaN( parseFloat(obj) ) && isFinite( obj ); - }, - - type: function( obj ) { - if ( obj == null ) { - return String( obj ); - } - return typeof obj === "object" || typeof obj === "function" ? - class2type[ core_toString.call(obj) ] || "object" : - typeof obj; - }, - - isPlainObject: function( obj ) { - var key; - - // Must be an Object. - // Because of IE, we also have to check the presence of the constructor property. - // Make sure that DOM nodes and window objects don't pass through, as well - if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { - return false; - } - - try { - // Not own constructor property must be Object - if ( obj.constructor && - !core_hasOwn.call(obj, "constructor") && - !core_hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { - return false; - } - } catch ( e ) { - // IE8,9 Will throw exceptions on certain host objects #9897 - return false; - } - - // Support: IE<9 - // Handle iteration over inherited properties before own properties. - if ( jQuery.support.ownLast ) { - for ( key in obj ) { - return core_hasOwn.call( obj, key ); - } - } - - // Own properties are enumerated firstly, so to speed up, - // if last one is own, then all properties are own. - for ( key in obj ) {} - - return key === undefined || core_hasOwn.call( obj, key ); - }, - - isEmptyObject: function( obj ) { - var name; - for ( name in obj ) { - return false; - } - return true; - }, - - error: function( msg ) { - throw new Error( msg ); - }, - - // data: string of html - // context (optional): If specified, the fragment will be created in this context, defaults to document - // keepScripts (optional): If true, will include scripts passed in the html string - parseHTML: function( data, context, keepScripts ) { - if ( !data || typeof data !== "string" ) { - return null; - } - if ( typeof context === "boolean" ) { - keepScripts = context; - context = false; - } - context = context || document; - - var parsed = rsingleTag.exec( data ), - scripts = !keepScripts && []; - - // Single tag - if ( parsed ) { - return [ context.createElement( parsed[1] ) ]; - } - - parsed = jQuery.buildFragment( [ data ], context, scripts ); - if ( scripts ) { - jQuery( scripts ).remove(); - } - return jQuery.merge( [], parsed.childNodes ); - }, - - parseJSON: function( data ) { - // Attempt to parse using the native JSON parser first - if ( window.JSON && window.JSON.parse ) { - return window.JSON.parse( data ); - } - - if ( data === null ) { - return data; - } - - if ( typeof data === "string" ) { - - // Make sure leading/trailing whitespace is removed (IE can't handle it) - data = jQuery.trim( data ); - - if ( data ) { - // Make sure the incoming data is actual JSON - // Logic borrowed from http://json.org/json2.js - if ( rvalidchars.test( data.replace( rvalidescape, "@" ) - .replace( rvalidtokens, "]" ) - .replace( rvalidbraces, "")) ) { - - return ( new Function( "return " + data ) )(); - } - } - } - - jQuery.error( "Invalid JSON: " + data ); - }, - - // Cross-browser xml parsing - parseXML: function( data ) { - var xml, tmp; - if ( !data || typeof data !== "string" ) { - return null; - } - try { - if ( window.DOMParser ) { // Standard - tmp = new DOMParser(); - xml = tmp.parseFromString( data , "text/xml" ); - } else { // IE - xml = new ActiveXObject( "Microsoft.XMLDOM" ); - xml.async = "false"; - xml.loadXML( data ); - } - } catch( e ) { - xml = undefined; - } - if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; - }, - - noop: function() {}, - - // Evaluates a script in a global context - // Workarounds based on findings by Jim Driscoll - // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context - globalEval: function( data ) { - if ( data && jQuery.trim( data ) ) { - // We use execScript on Internet Explorer - // We use an anonymous function so that context is window - // rather than jQuery in Firefox - ( window.execScript || function( data ) { - window[ "eval" ].call( window, data ); - } )( data ); - } - }, - - // Convert dashed to camelCase; used by the css and data modules - // Microsoft forgot to hump their vendor prefix (#9572) - camelCase: function( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); - }, - - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - }, - - // args is for internal usage only - each: function( obj, callback, args ) { - var value, - i = 0, - length = obj.length, - isArray = isArraylike( obj ); - - if ( args ) { - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback.apply( obj[ i ], args ); - - if ( value === false ) { - break; - } - } - } else { - for ( i in obj ) { - value = callback.apply( obj[ i ], args ); - - if ( value === false ) { - break; - } - } - } - - // A special, fast, case for the most common use of each - } else { - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback.call( obj[ i ], i, obj[ i ] ); - - if ( value === false ) { - break; - } - } - } else { - for ( i in obj ) { - value = callback.call( obj[ i ], i, obj[ i ] ); - - if ( value === false ) { - break; - } - } - } - } - - return obj; - }, - - // Use native String.trim function wherever possible - trim: core_trim && !core_trim.call("\uFEFF\xA0") ? - function( text ) { - return text == null ? - "" : - core_trim.call( text ); - } : - - // Otherwise use our own trimming functionality - function( text ) { - return text == null ? - "" : - ( text + "" ).replace( rtrim, "" ); - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArraylike( Object(arr) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - core_push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - var len; - - if ( arr ) { - if ( core_indexOf ) { - return core_indexOf.call( arr, elem, i ); - } - - len = arr.length; - i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; - - for ( ; i < len; i++ ) { - // Skip accessing in sparse arrays - if ( i in arr && arr[ i ] === elem ) { - return i; - } - } - } - - return -1; - }, - - merge: function( first, second ) { - var l = second.length, - i = first.length, - j = 0; - - if ( typeof l === "number" ) { - for ( ; j < l; j++ ) { - first[ i++ ] = second[ j ]; - } - } else { - while ( second[j] !== undefined ) { - first[ i++ ] = second[ j++ ]; - } - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, inv ) { - var retVal, - ret = [], - i = 0, - length = elems.length; - inv = !!inv; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - retVal = !!callback( elems[ i ], i ); - if ( inv !== retVal ) { - ret.push( elems[ i ] ); - } - } - - return ret; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var value, - i = 0, - length = elems.length, - isArray = isArraylike( elems ), - ret = []; - - // Go through the array, translating each of the items to their - if ( isArray ) { - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret[ ret.length ] = value; - } - } - } - - // Flatten any nested arrays - return core_concat.apply( [], ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // Bind a function to a context, optionally partially applying any - // arguments. - proxy: function( fn, context ) { - var args, proxy, tmp; - - if ( typeof context === "string" ) { - tmp = fn[ context ]; - context = fn; - fn = tmp; - } - - // Quick check to determine if target is callable, in the spec - // this throws a TypeError, but we will just return undefined. - if ( !jQuery.isFunction( fn ) ) { - return undefined; - } - - // Simulated bind - args = core_slice.call( arguments, 2 ); - proxy = function() { - return fn.apply( context || this, args.concat( core_slice.call( arguments ) ) ); - }; - - // Set the guid of unique handler to the same of original handler, so it can be removed - proxy.guid = fn.guid = fn.guid || jQuery.guid++; - - return proxy; - }, - - // Multifunctional method to get and set values of a collection - // The value/s can optionally be executed if it's a function - access: function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - length = elems.length, - bulk = key == null; - - // Sets many values - if ( jQuery.type( key ) === "object" ) { - chainable = true; - for ( i in key ) { - jQuery.access( elems, fn, i, key[i], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !jQuery.isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < length; i++ ) { - fn( elems[i], key, raw ? value : value.call( elems[i], i, fn( elems[i], key ) ) ); - } - } - } - - return chainable ? - elems : - - // Gets - bulk ? - fn.call( elems ) : - length ? fn( elems[0], key ) : emptyGet; - }, - - now: function() { - return ( new Date() ).getTime(); - }, - - // A method for quickly swapping in/out CSS properties to get correct calculations. - // Note: this method belongs to the css module but it's needed here for the support module. - // If support gets modularized, this method should be moved back to the css module. - swap: function( elem, options, callback, args ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.apply( elem, args || [] ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; - } -}); - -jQuery.ready.promise = function( obj ) { - if ( !readyList ) { - - readyList = jQuery.Deferred(); - - // Catch cases where $(document).ready() is called after the browser event has already occurred. - // we once tried to use readyState "interactive" here, but it caused issues like the one - // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 - if ( document.readyState === "complete" ) { - // Handle it asynchronously to allow scripts the opportunity to delay ready - setTimeout( jQuery.ready ); - - // Standards-based browsers support DOMContentLoaded - } else if ( document.addEventListener ) { - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed, false ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed, false ); - - // If IE event model is used - } else { - // Ensure firing before onload, maybe late but safe also for iframes - document.attachEvent( "onreadystatechange", completed ); - - // A fallback to window.onload, that will always work - window.attachEvent( "onload", completed ); - - // If IE and not a frame - // continually check to see if the document is ready - var top = false; - - try { - top = window.frameElement == null && document.documentElement; - } catch(e) {} - - if ( top && top.doScroll ) { - (function doScrollCheck() { - if ( !jQuery.isReady ) { - - try { - // Use the trick by Diego Perini - // http://javascript.nwbox.com/IEContentLoaded/ - top.doScroll("left"); - } catch(e) { - return setTimeout( doScrollCheck, 50 ); - } - - // detach all dom ready events - detach(); - - // and execute any waiting functions - jQuery.ready(); - } - })(); - } - } - } - return readyList.promise( obj ); -}; - -// Populate the class2type map -jQuery.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -}); - -function isArraylike( obj ) { - var length = obj.length, - type = jQuery.type( obj ); - - if ( jQuery.isWindow( obj ) ) { - return false; - } - - if ( obj.nodeType === 1 && length ) { - return true; - } - - return type === "array" || type !== "function" && - ( length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj ); -} - -// All jQuery objects should point back to these -rootjQuery = jQuery(document); -/*! - * Sizzle CSS Selector Engine v1.10.2 - * http://sizzlejs.com/ - * - * Copyright 2013 jQuery Foundation, Inc. and other contributors - * Released under the MIT license - * http://jquery.org/license - * - * Date: 2013-07-03 - */ -(function( window, undefined ) { - -var i, - support, - cachedruns, - Expr, - getText, - isXML, - compile, - outermostContext, - sortInput, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + -(new Date()), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - hasDuplicate = false, - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - return 0; - } - return 0; - }, - - // General-purpose constants - strundefined = typeof undefined, - MAX_NEGATIVE = 1 << 31, - - // Instance methods - hasOwn = ({}).hasOwnProperty, - arr = [], - pop = arr.pop, - push_native = arr.push, - push = arr.push, - slice = arr.slice, - // Use a stripped-down indexOf if we can't use a native one - indexOf = arr.indexOf || function( elem ) { - var i = 0, - len = this.length; - for ( ; i < len; i++ ) { - if ( this[i] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - // http://www.w3.org/TR/css3-syntax/#characters - characterEncoding = "(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+", - - // Loosely modeled on CSS identifier characters - // An unquoted value should be a CSS identifier http://www.w3.org/TR/css3-selectors/#attribute-selectors - // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier - identifier = characterEncoding.replace( "w", "w#" ), - - // Acceptable operators http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + characterEncoding + ")" + whitespace + - "*(?:([*^$|!~]?=)" + whitespace + "*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|(" + identifier + ")|)|)" + whitespace + "*\\]", - - // Prefer arguments quoted, - // then not containing pseudos/brackets, - // then attribute selectors/non-parenthetical expressions, - // then anything else - // These preferences are here to reduce the number of selectors - // needing tokenize in the PSEUDO preFilter - pseudos = ":(" + characterEncoding + ")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|" + attributes.replace( 3, 8 ) + ")*)|.*)\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), - - rsibling = new RegExp( whitespace + "*[+~]" ), - rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*)" + whitespace + "*\\]", "g" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + characterEncoding + ")" ), - "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ), - "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + - "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + - "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + - whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rescape = /'|\\/g, - - // CSS escapes http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), - funescape = function( _, escaped, escapedWhitespace ) { - var high = "0x" + escaped - 0x10000; - // NaN means non-codepoint - // Support: Firefox - // Workaround erroneous numeric interpretation of +"0x" - return high !== high || escapedWhitespace ? - escaped : - // BMP codepoint - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - // Supplemental Plane codepoint (surrogate pair) - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }; - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - (arr = slice.call( preferredDoc.childNodes )), - preferredDoc.childNodes - ); - // Support: Android<4.0 - // Detect silently failing push.apply - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - push_native.apply( target, slice.call(els) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - // Can't trust NodeList.length - while ( (target[j++] = els[i++]) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var match, elem, m, nodeType, - // QSA vars - i, groups, old, nid, newContext, newSelector; - - if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { - setDocument( context ); - } - - context = context || document; - results = results || []; - - if ( !selector || typeof selector !== "string" ) { - return results; - } - - if ( (nodeType = context.nodeType) !== 1 && nodeType !== 9 ) { - return []; - } - - if ( documentIsHTML && !seed ) { - - // Shortcuts - if ( (match = rquickExpr.exec( selector )) ) { - // Speed-up: Sizzle("#ID") - if ( (m = match[1]) ) { - if ( nodeType === 9 ) { - elem = context.getElementById( m ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - if ( elem && elem.parentNode ) { - // Handle the case where IE, Opera, and Webkit return items - // by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - } else { - // Context is not a document - if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && - contains( context, elem ) && elem.id === m ) { - results.push( elem ); - return results; - } - } - - // Speed-up: Sizzle("TAG") - } else if ( match[2] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Speed-up: Sizzle(".CLASS") - } else if ( (m = match[3]) && support.getElementsByClassName && context.getElementsByClassName ) { - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // QSA path - if ( support.qsa && (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { - nid = old = expando; - newContext = context; - newSelector = nodeType === 9 && selector; - - // qSA works strangely on Element-rooted queries - // We can work around this by specifying an extra ID on the root - // and working up from there (Thanks to Andrew Dupont for the technique) - // IE 8 doesn't work on object elements - if ( nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { - groups = tokenize( selector ); - - if ( (old = context.getAttribute("id")) ) { - nid = old.replace( rescape, "\\$&" ); - } else { - context.setAttribute( "id", nid ); - } - nid = "[id='" + nid + "'] "; - - i = groups.length; - while ( i-- ) { - groups[i] = nid + toSelector( groups[i] ); - } - newContext = rsibling.test( selector ) && context.parentNode || context; - newSelector = groups.join(","); - } - - if ( newSelector ) { - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch(qsaError) { - } finally { - if ( !old ) { - context.removeAttribute("id"); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {Function(string, Object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key += " " ) > Expr.cacheLength ) { - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return (cache[ key ] = value); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created div and expects a boolean result - */ -function assert( fn ) { - var div = document.createElement("div"); - - try { - return !!fn( div ); - } catch (e) { - return false; - } finally { - // Remove from its parent by default - if ( div.parentNode ) { - div.parentNode.removeChild( div ); - } - // release memory in IE - div = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split("|"), - i = attrs.length; - - while ( i-- ) { - Expr.attrHandle[ arr[i] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - ( ~b.sourceIndex || MAX_NEGATIVE ) - - ( ~a.sourceIndex || MAX_NEGATIVE ); - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( (cur = cur.nextSibling) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return (name === "input" || name === "button") && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction(function( argument ) { - argument = +argument; - return markFunction(function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ (j = matchIndexes[i]) ] ) { - seed[j] = !(matches[j] = seed[j]); - } - } - }); - }); -} - -/** - * Detect xml - * @param {Element|Object} elem An element or a document - */ -isXML = Sizzle.isXML = function( elem ) { - // documentElement is verified for cases where it doesn't yet exist - // (such as loading iframes in IE - #4833) - var documentElement = elem && (elem.ownerDocument || elem).documentElement; - return documentElement ? documentElement.nodeName !== "HTML" : false; -}; - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var doc = node ? node.ownerDocument || node : preferredDoc, - parent = doc.defaultView; - - // If no document and documentElement is available, return - if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Set our document - document = doc; - docElem = doc.documentElement; - - // Support tests - documentIsHTML = !isXML( doc ); - - // Support: IE>8 - // If iframe document is assigned to "document" variable and if iframe has been reloaded, - // IE will throw "permission denied" error when accessing "document" variable, see jQuery #13936 - // IE6-8 do not support the defaultView property so parent will be undefined - if ( parent && parent.attachEvent && parent !== parent.top ) { - parent.attachEvent( "onbeforeunload", function() { - setDocument(); - }); - } - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties (excepting IE8 booleans) - support.attributes = assert(function( div ) { - div.className = "i"; - return !div.getAttribute("className"); - }); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert(function( div ) { - div.appendChild( doc.createComment("") ); - return !div.getElementsByTagName("*").length; - }); - - // Check if getElementsByClassName can be trusted - support.getElementsByClassName = assert(function( div ) { - div.innerHTML = "
"; - - // Support: Safari<4 - // Catch class over-caching - div.firstChild.className = "i"; - // Support: Opera<10 - // Catch gEBCN failure to find non-leading classes - return div.getElementsByClassName("i").length === 2; - }); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert(function( div ) { - docElem.appendChild( div ).id = expando; - return !doc.getElementsByName || !doc.getElementsByName( expando ).length; - }); - - // ID find and filter - if ( support.getById ) { - Expr.find["ID"] = function( id, context ) { - if ( typeof context.getElementById !== strundefined && documentIsHTML ) { - var m = context.getElementById( id ); - // Check parentNode to catch when Blackberry 4.6 returns - // nodes that are no longer in the document #6963 - return m && m.parentNode ? [m] : []; - } - }; - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute("id") === attrId; - }; - }; - } else { - // Support: IE6/7 - // getElementById is not reliable as a find shortcut - delete Expr.find["ID"]; - - Expr.filter["ID"] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); - return node && node.value === attrId; - }; - }; - } - - // Tag - Expr.find["TAG"] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== strundefined ) { - return context.getElementsByTagName( tag ); - } - } : - function( tag, context ) { - var elem, - tmp = [], - i = 0, - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( (elem = results[i++]) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== strundefined && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See http://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( (support.qsa = rnative.test( doc.querySelectorAll )) ) { - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert(function( div ) { - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // http://bugs.jquery.com/ticket/12359 - div.innerHTML = ""; - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !div.querySelectorAll("[selected]").length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !div.querySelectorAll(":checked").length ) { - rbuggyQSA.push(":checked"); - } - }); - - assert(function( div ) { - - // Support: Opera 10-12/IE8 - // ^= $= *= and empty values - // Should not select anything - // Support: Windows 8 Native Apps - // The type attribute is restricted during .innerHTML assignment - var input = doc.createElement("input"); - input.setAttribute( "type", "hidden" ); - div.appendChild( input ).setAttribute( "t", "" ); - - if ( div.querySelectorAll("[t^='']").length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( !div.querySelectorAll(":enabled").length ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Opera 10-11 does not throw on post-comma invalid pseudos - div.querySelectorAll("*,:x"); - rbuggyQSA.push(",.*:"); - }); - } - - if ( (support.matchesSelector = rnative.test( (matches = docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector) )) ) { - - assert(function( div ) { - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( div, "div" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( div, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - }); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); - - /* Contains - ---------------------------------------------------------------------- */ - - // Element contains another - // Purposefully does not implement inclusive descendent - // As in, an element does not contain itself - contains = rnative.test( docElem.contains ) || docElem.compareDocumentPosition ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - )); - } : - function( a, b ) { - if ( b ) { - while ( (b = b.parentNode) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = docElem.compareDocumentPosition ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var compare = b.compareDocumentPosition && a.compareDocumentPosition && a.compareDocumentPosition( b ); - - if ( compare ) { - // Disconnected nodes - if ( compare & 1 || - (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { - - // Choose the first element that is related to our preferred document - if ( a === doc || contains(preferredDoc, a) ) { - return -1; - } - if ( b === doc || contains(preferredDoc, b) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } - - // Not directly comparable, sort on existence of method - return a.compareDocumentPosition ? -1 : 1; - } : - function( a, b ) { - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - - // Parentless nodes are either documents or disconnected - } else if ( !aup || !bup ) { - return a === doc ? -1 : - b === doc ? 1 : - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( (cur = cur.parentNode) ) { - ap.unshift( cur ); - } - cur = b; - while ( (cur = cur.parentNode) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[i] === bp[i] ) { - i++; - } - - return i ? - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[i], bp[i] ) : - - // Otherwise nodes in our document sort first - ap[i] === preferredDoc ? -1 : - bp[i] === preferredDoc ? 1 : - 0; - }; - - return doc; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - // Make sure that attribute selectors are quoted - expr = expr.replace( rattributeQuotes, "='$1']" ); - - if ( support.matchesSelector && documentIsHTML && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch(e) {} - } - - return Sizzle( expr, document, null, [elem] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - // Set document vars if needed - if ( ( context.ownerDocument || context ) !== document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - // Set document vars if needed - if ( ( elem.ownerDocument || elem ) !== document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val === undefined ? - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - (val = elem.getAttributeNode(name)) && val.specified ? - val.value : - null : - val; -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( (elem = results[i++]) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - // If no nodeType, this is expected to be an array - for ( ; (node = elem[i]); i++ ) { - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - // Use textContent for elements - // innerText usage removed for consistency of new lines (see #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[1] = match[1].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[3] = ( match[4] || match[5] || "" ).replace( runescape, funescape ); - - if ( match[2] === "~=" ) { - match[3] = " " + match[3] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[1] = match[1].toLowerCase(); - - if ( match[1].slice( 0, 3 ) === "nth" ) { - // nth-* requires argument - if ( !match[3] ) { - Sizzle.error( match[0] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); - match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); - - // other types prohibit arguments - } else if ( match[3] ) { - Sizzle.error( match[0] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[5] && match[2]; - - if ( matchExpr["CHILD"].test( match[0] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[3] && match[4] !== undefined ) { - match[2] = match[4]; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - // Get excess from tokenize (recursively) - (excess = tokenize( unquoted, true )) && - // advance to the next closing parenthesis - (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { - - // excess is a negative index - match[0] = match[0].slice( 0, excess ); - match[2] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { return true; } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && - classCache( className, function( elem ) { - return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== strundefined && elem.getAttribute("class") || "" ); - }); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - }; - }, - - "CHILD": function( type, what, argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, context, xml ) { - var cache, outerCache, node, diff, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( (node = node[ dir ]) ) { - if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { - return false; - } - } - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - // Seek `elem` from a previously-cached index - outerCache = parent[ expando ] || (parent[ expando ] = {}); - cache = outerCache[ type ] || []; - nodeIndex = cache[0] === dirruns && cache[1]; - diff = cache[0] === dirruns && cache[2]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( (node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - (diff = nodeIndex = 0) || start.pop()) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - outerCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - // Use previously-cached element index if available - } else if ( useCache && (cache = (elem[ expando ] || (elem[ expando ] = {}))[ type ]) && cache[0] === dirruns ) { - diff = cache[1]; - - // xml :nth-child(...) or :nth-last-child(...) or :nth(-last)?-of-type(...) - } else { - // Use the same loop as above to seek `elem` from the start - while ( (node = ++nodeIndex && node && node[ dir ] || - (diff = nodeIndex = 0) || start.pop()) ) { - - if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { - // Cache the index of each encountered element - if ( useCache ) { - (node[ expando ] || (node[ expando ] = {}))[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction(function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf.call( seed, matched[i] ); - seed[ idx ] = !( matches[ idx ] = matched[i] ); - } - }) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - // Potentially complex pseudos - "not": markFunction(function( selector ) { - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction(function( seed, matches, context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( (elem = unmatched[i]) ) { - seed[i] = !(matches[i] = elem); - } - } - }) : - function( elem, context, xml ) { - input[0] = elem; - matcher( input, null, xml, results ); - return !results.pop(); - }; - }), - - "has": markFunction(function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - }), - - "contains": markFunction(function( text ) { - return function( elem ) { - return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; - }; - }), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - // lang value must be a valid identifier - if ( !ridentifier.test(lang || "") ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( (elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); - return false; - }; - }), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); - }, - - // Boolean properties - "enabled": function( elem ) { - return elem.disabled === false; - }, - - "disabled": function( elem ) { - return elem.disabled === true; - }, - - "checked": function( elem ) { - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); - }, - - "selected": function( elem ) { - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is only affected by element nodes and content nodes(including text(3), cdata(4)), - // not comment, processing instructions, or others - // Thanks to Diego Perini for the nodeName shortcut - // Greater than "@" means alpha characters (specifically not starting with "#" or "?") - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeName > "@" || elem.nodeType === 3 || elem.nodeType === 4 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos["empty"]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc) - // use getAttribute instead to test this case - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === elem.type ); - }, - - // Position-in-collection - "first": createPositionalPseudo(function() { - return [ 0 ]; - }), - - "last": createPositionalPseudo(function( matchIndexes, length ) { - return [ length - 1 ]; - }), - - "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - }), - - "even": createPositionalPseudo(function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "odd": createPositionalPseudo(function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }), - - "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - }) - } -}; - -Expr.pseudos["nth"] = Expr.pseudos["eq"]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -function tokenize( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || (match = rcomma.exec( soFar )) ) { - if ( match ) { - // Don't consume trailing commas as valid - soFar = soFar.slice( match[0].length ) || soFar; - } - groups.push( tokens = [] ); - } - - matched = false; - - // Combinators - if ( (match = rcombinators.exec( soFar )) ) { - matched = match.shift(); - tokens.push({ - value: matched, - // Cast descendant combinators to space - type: match[0].replace( rtrim, " " ) - }); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || - (match = preFilters[ type ]( match ))) ) { - matched = match.shift(); - tokens.push({ - value: matched, - type: type, - matches: match - }); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -} - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[i].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - checkNonElements = base && dir === "parentNode", - doneName = done++; - - return combinator.first ? - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var data, cache, outerCache, - dirkey = dirruns + " " + doneName; - - // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching - if ( xml ) { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( (elem = elem[ dir ]) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || (elem[ expando ] = {}); - if ( (cache = outerCache[ dir ]) && cache[0] === dirkey ) { - if ( (data = cache[1]) === true || data === cachedruns ) { - return data === true; - } - } else { - cache = outerCache[ dir ] = [ dirkey ]; - cache[1] = matcher( elem, context, xml ) || cachedruns; - if ( cache[1] === true ) { - return true; - } - } - } - } - } - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[i]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[0]; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( (elem = unmatched[i]) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction(function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( (elem = temp[i]) ) { - matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) ) { - // Restore matcherIn since elem is not yet a final match - temp.push( (matcherIn[i] = elem) ); - } - } - postFinder( null, (matcherOut = []), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( (elem = matcherOut[i]) && - (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { - - seed[temp] = !(results[temp] = elem); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - }); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[0].type ], - implicitRelative = leadingRelative || Expr.relative[" "], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf.call( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - (checkContext = context).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - } ]; - - for ( ; i < len; i++ ) { - if ( (matcher = Expr.relative[ tokens[i].type ]) ) { - matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; - } else { - matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[j].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - // A counter to specify which element is currently being matched - var matcherCachedRuns = 0, - bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, expandContext ) { - var elem, j, matcher, - setMatched = [], - matchedCount = 0, - i = "0", - unmatched = seed && [], - outermost = expandContext != null, - contextBackup = outermostContext, - // We must always have either seed elements or context - elems = seed || byElement && Expr.find["TAG"]( "*", expandContext && context.parentNode || context ), - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1); - - if ( outermost ) { - outermostContext = context !== document && context; - cachedruns = matcherCachedRuns; - } - - // Add elements passing elementMatchers directly to results - // Keep `i` a string if there are no elements so `matchedCount` will be "00" below - for ( ; (elem = elems[i]) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - while ( (matcher = elementMatchers[j++]) ) { - if ( matcher( elem, context, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - cachedruns = ++matcherCachedRuns; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - // They will have gone through all possible matchers - if ( (elem = !matcher && elem) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // Apply set filters to unmatched elements - matchedCount += i; - if ( bySet && i !== matchedCount ) { - j = 0; - while ( (matcher = setMatchers[j++]) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !(unmatched[i] || setMatched[i]) ) { - setMatched[i] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - // Generate a function of recursive functions that can be used to check each element - if ( !group ) { - group = tokenize( selector ); - } - i = group.length; - while ( i-- ) { - cached = matcherFromTokens( group[i] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); - } - return cached; -}; - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[i], results ); - } - return results; -} - -function select( selector, context, results, seed ) { - var i, tokens, token, type, find, - match = tokenize( selector ); - - if ( !seed ) { - // Try to minimize operations if there is only one group - if ( match.length === 1 ) { - - // Take a shortcut and set the context if the root selector is an ID - tokens = match[0] = match[0].slice( 0 ); - if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && - support.getById && context.nodeType === 9 && documentIsHTML && - Expr.relative[ tokens[1].type ] ) { - - context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; - if ( !context ) { - return results; - } - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[i]; - - // Abort if we hit a combinator - if ( Expr.relative[ (type = token.type) ] ) { - break; - } - if ( (find = Expr.find[ type ]) ) { - // Search, expanding context for leading sibling combinators - if ( (seed = find( - token.matches[0].replace( runescape, funescape ), - rsibling.test( tokens[0].type ) && context.parentNode || context - )) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - } - - // Compile and execute a filtering function - // Provide `match` to avoid retokenization if we modified the selector above - compile( selector, match )( - seed, - context, - !documentIsHTML, - results, - rsibling.test( selector ) - ); - return results; -} - -// One-time assignments - -// Sort stability -support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; - -// Support: Chrome<14 -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert(function( div1 ) { - // Should return 1, but returns 4 (following) - return div1.compareDocumentPosition( document.createElement("div") ) & 1; -}); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert(function( div ) { - div.innerHTML = ""; - return div.firstChild.getAttribute("href") === "#" ; -}) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - }); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert(function( div ) { - div.innerHTML = ""; - div.firstChild.setAttribute( "value", "" ); - return div.firstChild.getAttribute( "value" ) === ""; -}) ) { - addHandle( "value", function( elem, name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - }); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert(function( div ) { - return div.getAttribute("disabled") == null; -}) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return (val = elem.getAttributeNode( name )) && val.specified ? - val.value : - elem[ name ] === true ? name.toLowerCase() : null; - } - }); -} - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; -jQuery.expr[":"] = jQuery.expr.pseudos; -jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; - - -})( window ); -// String to Object options format cache -var optionsCache = {}; - -// Convert String-formatted options into Object-formatted ones and store in cache -function createOptions( options ) { - var object = optionsCache[ options ] = {}; - jQuery.each( options.match( core_rnotwhite ) || [], function( _, flag ) { - object[ flag ] = true; - }); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - ( optionsCache[ options ] || createOptions( options ) ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - // Last fire value (for non-forgettable lists) - memory, - // Flag to know if list was already fired - fired, - // End of the loop when firing - firingLength, - // Index of currently firing callback (modified by remove if needed) - firingIndex, - // First callback to fire (used internally by add and fireWith) - firingStart, - // Actual callback list - list = [], - // Stack of fire calls for repeatable lists - stack = !options.once && [], - // Fire callbacks - fire = function( data ) { - memory = options.memory && data; - fired = true; - firingIndex = firingStart || 0; - firingStart = 0; - firingLength = list.length; - firing = true; - for ( ; list && firingIndex < firingLength; firingIndex++ ) { - if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { - memory = false; // To prevent further calls using add - break; - } - } - firing = false; - if ( list ) { - if ( stack ) { - if ( stack.length ) { - fire( stack.shift() ); - } - } else if ( memory ) { - list = []; - } else { - self.disable(); - } - } - }, - // Actual Callbacks object - self = { - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - // First, we save the current length - var start = list.length; - (function add( args ) { - jQuery.each( args, function( _, arg ) { - var type = jQuery.type( arg ); - if ( type === "function" ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && type !== "string" ) { - // Inspect recursively - add( arg ); - } - }); - })( arguments ); - // Do we need to add the callbacks to the - // current firing batch? - if ( firing ) { - firingLength = list.length; - // With memory, if we're not firing then - // we should call right away - } else if ( memory ) { - firingStart = start; - fire( memory ); - } - } - return this; - }, - // Remove a callback from the list - remove: function() { - if ( list ) { - jQuery.each( arguments, function( _, arg ) { - var index; - while( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - // Handle firing indexes - if ( firing ) { - if ( index <= firingLength ) { - firingLength--; - } - if ( index <= firingIndex ) { - firingIndex--; - } - } - } - }); - } - return this; - }, - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? jQuery.inArray( fn, list ) > -1 : !!( list && list.length ); - }, - // Remove all callbacks from the list - empty: function() { - list = []; - firingLength = 0; - return this; - }, - // Have the list do nothing anymore - disable: function() { - list = stack = memory = undefined; - return this; - }, - // Is it disabled? - disabled: function() { - return !list; - }, - // Lock the list in its current state - lock: function() { - stack = undefined; - if ( !memory ) { - self.disable(); - } - return this; - }, - // Is it locked? - locked: function() { - return !stack; - }, - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( list && ( !fired || stack ) ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - if ( firing ) { - stack.push( args ); - } else { - fire( args ); - } - } - return this; - }, - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; -jQuery.extend({ - - Deferred: function( func ) { - var tuples = [ - // action, add listener, listener list, final state - [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], - [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], - [ "notify", "progress", jQuery.Callbacks("memory") ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - then: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - return jQuery.Deferred(function( newDefer ) { - jQuery.each( tuples, function( i, tuple ) { - var action = tuple[ 0 ], - fn = jQuery.isFunction( fns[ i ] ) && fns[ i ]; - // deferred[ done | fail | progress ] for forwarding actions to newDefer - deferred[ tuple[1] ](function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && jQuery.isFunction( returned.promise ) ) { - returned.promise() - .done( newDefer.resolve ) - .fail( newDefer.reject ) - .progress( newDefer.notify ); - } else { - newDefer[ action + "With" ]( this === promise ? newDefer.promise() : this, fn ? [ returned ] : arguments ); - } - }); - }); - fns = null; - }).promise(); - }, - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Keep pipe for back-compat - promise.pipe = promise.then; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 3 ]; - - // promise[ done | fail | progress ] = list.add - promise[ tuple[1] ] = list.add; - - // Handle state - if ( stateString ) { - list.add(function() { - // state = [ resolved | rejected ] - state = stateString; - - // [ reject_list | resolve_list ].disable; progress_list.lock - }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); - } - - // deferred[ resolve | reject | notify ] - deferred[ tuple[0] ] = function() { - deferred[ tuple[0] + "With" ]( this === deferred ? promise : this, arguments ); - return this; - }; - deferred[ tuple[0] + "With" ] = list.fireWith; - }); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( subordinate /* , ..., subordinateN */ ) { - var i = 0, - resolveValues = core_slice.call( arguments ), - length = resolveValues.length, - - // the count of uncompleted subordinates - remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, - - // the master Deferred. If resolveValues consist of only a single Deferred, just use that. - deferred = remaining === 1 ? subordinate : jQuery.Deferred(), - - // Update function for both resolve and progress values - updateFunc = function( i, contexts, values ) { - return function( value ) { - contexts[ i ] = this; - values[ i ] = arguments.length > 1 ? core_slice.call( arguments ) : value; - if( values === progressValues ) { - deferred.notifyWith( contexts, values ); - } else if ( !( --remaining ) ) { - deferred.resolveWith( contexts, values ); - } - }; - }, - - progressValues, progressContexts, resolveContexts; - - // add listeners to Deferred subordinates; treat others as resolved - if ( length > 1 ) { - progressValues = new Array( length ); - progressContexts = new Array( length ); - resolveContexts = new Array( length ); - for ( ; i < length; i++ ) { - if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { - resolveValues[ i ].promise() - .done( updateFunc( i, resolveContexts, resolveValues ) ) - .fail( deferred.reject ) - .progress( updateFunc( i, progressContexts, progressValues ) ); - } else { - --remaining; - } - } - } - - // if we're not waiting on anything, resolve the master - if ( !remaining ) { - deferred.resolveWith( resolveContexts, resolveValues ); - } - - return deferred.promise(); - } -}); -jQuery.support = (function( support ) { - - var all, a, input, select, fragment, opt, eventName, isSupported, i, - div = document.createElement("div"); - - // Setup - div.setAttribute( "className", "t" ); - div.innerHTML = "
a"; - - // Finish early in limited (non-browser) environments - all = div.getElementsByTagName("*") || []; - a = div.getElementsByTagName("a")[ 0 ]; - if ( !a || !a.style || !all.length ) { - return support; - } - - // First batch of tests - select = document.createElement("select"); - opt = select.appendChild( document.createElement("option") ); - input = div.getElementsByTagName("input")[ 0 ]; - - a.style.cssText = "top:1px;float:left;opacity:.5"; - - // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7) - support.getSetAttribute = div.className !== "t"; - - // IE strips leading whitespace when .innerHTML is used - support.leadingWhitespace = div.firstChild.nodeType === 3; - - // Make sure that tbody elements aren't automatically inserted - // IE will insert them into empty tables - support.tbody = !div.getElementsByTagName("tbody").length; - - // Make sure that link elements get serialized correctly by innerHTML - // This requires a wrapper element in IE - support.htmlSerialize = !!div.getElementsByTagName("link").length; - - // Get the style information from getAttribute - // (IE uses .cssText instead) - support.style = /top/.test( a.getAttribute("style") ); - - // Make sure that URLs aren't manipulated - // (IE normalizes it by default) - support.hrefNormalized = a.getAttribute("href") === "/a"; - - // Make sure that element opacity exists - // (IE uses filter instead) - // Use a regex to work around a WebKit issue. See #5145 - support.opacity = /^0.5/.test( a.style.opacity ); - - // Verify style float existence - // (IE uses styleFloat instead of cssFloat) - support.cssFloat = !!a.style.cssFloat; - - // Check the default checkbox/radio value ("" on WebKit; "on" elsewhere) - support.checkOn = !!input.value; - - // Make sure that a selected-by-default option has a working selected property. - // (WebKit defaults to false instead of true, IE too, if it's in an optgroup) - support.optSelected = opt.selected; - - // Tests for enctype support on a form (#6743) - support.enctype = !!document.createElement("form").enctype; - - // Makes sure cloning an html5 element does not cause problems - // Where outerHTML is undefined, this still works - support.html5Clone = document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav>"; - - // Will be defined later - support.inlineBlockNeedsLayout = false; - support.shrinkWrapBlocks = false; - support.pixelPosition = false; - support.deleteExpando = true; - support.noCloneEvent = true; - support.reliableMarginRight = true; - support.boxSizingReliable = true; - - // Make sure checked status is properly cloned - input.checked = true; - support.noCloneChecked = input.cloneNode( true ).checked; - - // Make sure that the options inside disabled selects aren't marked as disabled - // (WebKit marks them as disabled) - select.disabled = true; - support.optDisabled = !opt.disabled; - - // Support: IE<9 - try { - delete div.test; - } catch( e ) { - support.deleteExpando = false; - } - - // Check if we can trust getAttribute("value") - input = document.createElement("input"); - input.setAttribute( "value", "" ); - support.input = input.getAttribute( "value" ) === ""; - - // Check if an input maintains its value after becoming a radio - input.value = "t"; - input.setAttribute( "type", "radio" ); - support.radioValue = input.value === "t"; - - // #11217 - WebKit loses check when the name is after the checked attribute - input.setAttribute( "checked", "t" ); - input.setAttribute( "name", "t" ); - - fragment = document.createDocumentFragment(); - fragment.appendChild( input ); - - // Check if a disconnected checkbox will retain its checked - // value of true after appended to the DOM (IE6/7) - support.appendChecked = input.checked; - - // WebKit doesn't clone checked state correctly in fragments - support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE<9 - // Opera does not clone events (and typeof div.attachEvent === undefined). - // IE9-10 clones events bound via attachEvent, but they don't trigger with .click() - if ( div.attachEvent ) { - div.attachEvent( "onclick", function() { - support.noCloneEvent = false; - }); - - div.cloneNode( true ).click(); - } - - // Support: IE<9 (lack submit/change bubble), Firefox 17+ (lack focusin event) - // Beware of CSP restrictions (https://developer.mozilla.org/en/Security/CSP) - for ( i in { submit: true, change: true, focusin: true }) { - div.setAttribute( eventName = "on" + i, "t" ); - - support[ i + "Bubbles" ] = eventName in window || div.attributes[ eventName ].expando === false; - } - - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - // Support: IE<9 - // Iteration over object's inherited properties before its own. - for ( i in jQuery( support ) ) { - break; - } - support.ownLast = i !== "0"; - - // Run tests that need a body at doc ready - jQuery(function() { - var container, marginDiv, tds, - divReset = "padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;", - body = document.getElementsByTagName("body")[0]; - - if ( !body ) { - // Return for frameset docs that don't have a body - return; - } - - container = document.createElement("div"); - container.style.cssText = "border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px"; - - body.appendChild( container ).appendChild( div ); - - // Support: IE8 - // Check if table cells still have offsetWidth/Height when they are set - // to display:none and there are still other visible table cells in a - // table row; if so, offsetWidth/Height are not reliable for use when - // determining if an element has been hidden directly using - // display:none (it is still safe to use offsets if a parent element is - // hidden; don safety goggles and see bug #4512 for more information). - div.innerHTML = "
t
"; - tds = div.getElementsByTagName("td"); - tds[ 0 ].style.cssText = "padding:0;margin:0;border:0;display:none"; - isSupported = ( tds[ 0 ].offsetHeight === 0 ); - - tds[ 0 ].style.display = ""; - tds[ 1 ].style.display = "none"; - - // Support: IE8 - // Check if empty table cells still have offsetWidth/Height - support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 ); - - // Check box-sizing and margin behavior. - div.innerHTML = ""; - div.style.cssText = "box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;"; - - // Workaround failing boxSizing test due to offsetWidth returning wrong value - // with some non-1 values of body zoom, ticket #13543 - jQuery.swap( body, body.style.zoom != null ? { zoom: 1 } : {}, function() { - support.boxSizing = div.offsetWidth === 4; - }); - - // Use window.getComputedStyle because jsdom on node.js will break without it. - if ( window.getComputedStyle ) { - support.pixelPosition = ( window.getComputedStyle( div, null ) || {} ).top !== "1%"; - support.boxSizingReliable = ( window.getComputedStyle( div, null ) || { width: "4px" } ).width === "4px"; - - // Check if div with explicit width and no margin-right incorrectly - // gets computed margin-right based on width of container. (#3333) - // Fails in WebKit before Feb 2011 nightlies - // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right - marginDiv = div.appendChild( document.createElement("div") ); - marginDiv.style.cssText = div.style.cssText = divReset; - marginDiv.style.marginRight = marginDiv.style.width = "0"; - div.style.width = "1px"; - - support.reliableMarginRight = - !parseFloat( ( window.getComputedStyle( marginDiv, null ) || {} ).marginRight ); - } - - if ( typeof div.style.zoom !== core_strundefined ) { - // Support: IE<8 - // Check if natively block-level elements act like inline-block - // elements when setting their display to 'inline' and giving - // them layout - div.innerHTML = ""; - div.style.cssText = divReset + "width:1px;padding:1px;display:inline;zoom:1"; - support.inlineBlockNeedsLayout = ( div.offsetWidth === 3 ); - - // Support: IE6 - // Check if elements with layout shrink-wrap their children - div.style.display = "block"; - div.innerHTML = "
"; - div.firstChild.style.width = "5px"; - support.shrinkWrapBlocks = ( div.offsetWidth !== 3 ); - - if ( support.inlineBlockNeedsLayout ) { - // Prevent IE 6 from affecting layout for positioned elements #11048 - // Prevent IE from shrinking the body in IE 7 mode #12869 - // Support: IE<8 - body.style.zoom = 1; - } - } - - body.removeChild( container ); - - // Null elements to avoid leaks in IE - container = div = tds = marginDiv = null; - }); - - // Null elements to avoid leaks in IE - all = select = fragment = opt = a = input = null; - - return support; -})({}); - -var rbrace = /(?:\{[\s\S]*\}|\[[\s\S]*\])$/, - rmultiDash = /([A-Z])/g; - -function internalData( elem, name, data, pvt /* Internal Use Only */ ){ - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var ret, thisCache, - internalKey = jQuery.expando, - - // We have to handle DOM nodes and JS objects differently because IE6-7 - // can't GC object references properly across the DOM-JS boundary - isNode = elem.nodeType, - - // Only DOM nodes need the global jQuery cache; JS object data is - // attached directly to the object so GC can occur automatically - cache = isNode ? jQuery.cache : elem, - - // Only defining an ID for JS objects if its cache already exists allows - // the code to shortcut on the same path as a DOM node with no cache - id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; - - // Avoid doing any more work than we need to when trying to get data on an - // object that has no data at all - if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && data === undefined && typeof name === "string" ) { - return; - } - - if ( !id ) { - // Only DOM nodes need a new unique ID for each element since their data - // ends up in the global cache - if ( isNode ) { - id = elem[ internalKey ] = core_deletedIds.pop() || jQuery.guid++; - } else { - id = internalKey; - } - } - - if ( !cache[ id ] ) { - // Avoid exposing jQuery metadata on plain JS objects when the object - // is serialized using JSON.stringify - cache[ id ] = isNode ? {} : { toJSON: jQuery.noop }; - } - - // An object can be passed to jQuery.data instead of a key/value pair; this gets - // shallow copied over onto the existing cache - if ( typeof name === "object" || typeof name === "function" ) { - if ( pvt ) { - cache[ id ] = jQuery.extend( cache[ id ], name ); - } else { - cache[ id ].data = jQuery.extend( cache[ id ].data, name ); - } - } - - thisCache = cache[ id ]; - - // jQuery data() is stored in a separate object inside the object's internal data - // cache in order to avoid key collisions between internal data and user-defined - // data. - if ( !pvt ) { - if ( !thisCache.data ) { - thisCache.data = {}; - } - - thisCache = thisCache.data; - } - - if ( data !== undefined ) { - thisCache[ jQuery.camelCase( name ) ] = data; - } - - // Check for both converted-to-camel and non-converted data property names - // If a data property was specified - if ( typeof name === "string" ) { - - // First Try to find as-is property data - ret = thisCache[ name ]; - - // Test for null|undefined property data - if ( ret == null ) { - - // Try to find the camelCased property - ret = thisCache[ jQuery.camelCase( name ) ]; - } - } else { - ret = thisCache; - } - - return ret; -} - -function internalRemoveData( elem, name, pvt ) { - if ( !jQuery.acceptData( elem ) ) { - return; - } - - var thisCache, i, - isNode = elem.nodeType, - - // See jQuery.data for more information - cache = isNode ? jQuery.cache : elem, - id = isNode ? elem[ jQuery.expando ] : jQuery.expando; - - // If there is already no cache entry for this object, there is no - // purpose in continuing - if ( !cache[ id ] ) { - return; - } - - if ( name ) { - - thisCache = pvt ? cache[ id ] : cache[ id ].data; - - if ( thisCache ) { - - // Support array or space separated string names for data keys - if ( !jQuery.isArray( name ) ) { - - // try the string as a key before any manipulation - if ( name in thisCache ) { - name = [ name ]; - } else { - - // split the camel cased version by spaces unless a key with the spaces exists - name = jQuery.camelCase( name ); - if ( name in thisCache ) { - name = [ name ]; - } else { - name = name.split(" "); - } - } - } else { - // If "name" is an array of keys... - // When data is initially created, via ("key", "val") signature, - // keys will be converted to camelCase. - // Since there is no way to tell _how_ a key was added, remove - // both plain key and camelCase key. #12786 - // This will only penalize the array argument path. - name = name.concat( jQuery.map( name, jQuery.camelCase ) ); - } - - i = name.length; - while ( i-- ) { - delete thisCache[ name[i] ]; - } - - // If there is no data left in the cache, we want to continue - // and let the cache object itself get destroyed - if ( pvt ? !isEmptyDataObject(thisCache) : !jQuery.isEmptyObject(thisCache) ) { - return; - } - } - } - - // See jQuery.data for more information - if ( !pvt ) { - delete cache[ id ].data; - - // Don't destroy the parent cache unless the internal data object - // had been the only thing left in it - if ( !isEmptyDataObject( cache[ id ] ) ) { - return; - } - } - - // Destroy the cache - if ( isNode ) { - jQuery.cleanData( [ elem ], true ); - - // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) - /* jshint eqeqeq: false */ - } else if ( jQuery.support.deleteExpando || cache != cache.window ) { - /* jshint eqeqeq: true */ - delete cache[ id ]; - - // When all else fails, null - } else { - cache[ id ] = null; - } -} - -jQuery.extend({ - cache: {}, - - // The following elements throw uncatchable exceptions if you - // attempt to add expando properties to them. - noData: { - "applet": true, - "embed": true, - // Ban all objects except for Flash (which handle expandos) - "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" - }, - - hasData: function( elem ) { - elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; - return !!elem && !isEmptyDataObject( elem ); - }, - - data: function( elem, name, data ) { - return internalData( elem, name, data ); - }, - - removeData: function( elem, name ) { - return internalRemoveData( elem, name ); - }, - - // For internal use only. - _data: function( elem, name, data ) { - return internalData( elem, name, data, true ); - }, - - _removeData: function( elem, name ) { - return internalRemoveData( elem, name, true ); - }, - - // A method for determining if a DOM node can handle the data expando - acceptData: function( elem ) { - // Do not set data on non-element because it will not be cleared (#8335). - if ( elem.nodeType && elem.nodeType !== 1 && elem.nodeType !== 9 ) { - return false; - } - - var noData = elem.nodeName && jQuery.noData[ elem.nodeName.toLowerCase() ]; - - // nodes accept data unless otherwise specified; rejection can be conditional - return !noData || noData !== true && elem.getAttribute("classid") === noData; - } -}); - -jQuery.fn.extend({ - data: function( key, value ) { - var attrs, name, - data = null, - i = 0, - elem = this[0]; - - // Special expections of .data basically thwart jQuery.access, - // so implement the relevant behavior ourselves - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = jQuery.data( elem ); - - if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { - attrs = elem.attributes; - for ( ; i < attrs.length; i++ ) { - name = attrs[i].name; - - if ( name.indexOf("data-") === 0 ) { - name = jQuery.camelCase( name.slice(5) ); - - dataAttr( elem, name, data[ name ] ); - } - } - jQuery._data( elem, "parsedAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each(function() { - jQuery.data( this, key ); - }); - } - - return arguments.length > 1 ? - - // Sets one value - this.each(function() { - jQuery.data( this, key, value ); - }) : - - // Gets one value - // Try to fetch any internally stored data first - elem ? dataAttr( elem, key, jQuery.data( elem, key ) ) : null; - }, - - removeData: function( key ) { - return this.each(function() { - jQuery.removeData( this, key ); - }); - } -}); - -function dataAttr( elem, key, data ) { - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - - var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); - - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = data === "true" ? true : - data === "false" ? false : - data === "null" ? null : - // Only convert to a number if it doesn't change the string - +data + "" === data ? +data : - rbrace.test( data ) ? jQuery.parseJSON( data ) : - data; - } catch( e ) {} - - // Make sure we set the data so it isn't changed later - jQuery.data( elem, key, data ); - - } else { - data = undefined; - } - } - - return data; -} - -// checks a cache object for emptiness -function isEmptyDataObject( obj ) { - var name; - for ( name in obj ) { - - // if the public data object is empty, the private is still empty - if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { - continue; - } - if ( name !== "toJSON" ) { - return false; - } - } - - return true; -} -jQuery.extend({ - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = jQuery._data( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || jQuery.isArray(data) ) { - queue = jQuery._data( elem, type, jQuery.makeArray(data) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // not intended for public consumption - generates a queueHooks object, or returns the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return jQuery._data( elem, key ) || jQuery._data( elem, key, { - empty: jQuery.Callbacks("once memory").add(function() { - jQuery._removeData( elem, type + "queue" ); - jQuery._removeData( elem, key ); - }) - }); - } -}); - -jQuery.fn.extend({ - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[0], type ); - } - - return data === undefined ? - this : - this.each(function() { - var queue = jQuery.queue( this, type, data ); - - // ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[0] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - }); - }, - dequeue: function( type ) { - return this.each(function() { - jQuery.dequeue( this, type ); - }); - }, - // Based off of the plugin by Clint Helfers, with permission. - // http://blindsignals.com/index.php/2009/07/jquery-delay/ - delay: function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = setTimeout( next, time ); - hooks.stop = function() { - clearTimeout( timeout ); - }; - }); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while( i-- ) { - tmp = jQuery._data( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -}); -var nodeHook, boolHook, - rclass = /[\t\r\n\f]/g, - rreturn = /\r/g, - rfocusable = /^(?:input|select|textarea|button|object)$/i, - rclickable = /^(?:a|area)$/i, - ruseDefault = /^(?:checked|selected)$/i, - getSetAttribute = jQuery.support.getSetAttribute, - getSetInput = jQuery.support.input; - -jQuery.fn.extend({ - attr: function( name, value ) { - return jQuery.access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each(function() { - jQuery.removeAttr( this, name ); - }); - }, - - prop: function( name, value ) { - return jQuery.access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - name = jQuery.propFix[ name ] || name; - return this.each(function() { - // try/catch handles cases where IE balks (such as removing a property on window) - try { - this[ name ] = undefined; - delete this[ name ]; - } catch( e ) {} - }); - }, - - addClass: function( value ) { - var classes, elem, cur, clazz, j, - i = 0, - len = this.length, - proceed = typeof value === "string" && value; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).addClass( value.call( this, j, this.className ) ); - }); - } - - if ( proceed ) { - // The disjunction here is for better compressibility (see removeClass) - classes = ( value || "" ).match( core_rnotwhite ) || []; - - for ( ; i < len; i++ ) { - elem = this[ i ]; - cur = elem.nodeType === 1 && ( elem.className ? - ( " " + elem.className + " " ).replace( rclass, " " ) : - " " - ); - - if ( cur ) { - j = 0; - while ( (clazz = classes[j++]) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - elem.className = jQuery.trim( cur ); - - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, clazz, j, - i = 0, - len = this.length, - proceed = arguments.length === 0 || typeof value === "string" && value; - - if ( jQuery.isFunction( value ) ) { - return this.each(function( j ) { - jQuery( this ).removeClass( value.call( this, j, this.className ) ); - }); - } - if ( proceed ) { - classes = ( value || "" ).match( core_rnotwhite ) || []; - - for ( ; i < len; i++ ) { - elem = this[ i ]; - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( elem.className ? - ( " " + elem.className + " " ).replace( rclass, " " ) : - "" - ); - - if ( cur ) { - j = 0; - while ( (clazz = classes[j++]) ) { - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) >= 0 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - elem.className = value ? jQuery.trim( cur ) : ""; - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value; - - if ( typeof stateVal === "boolean" && type === "string" ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( jQuery.isFunction( value ) ) { - return this.each(function( i ) { - jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal ); - }); - } - - return this.each(function() { - if ( type === "string" ) { - // toggle individual class names - var className, - i = 0, - self = jQuery( this ), - classNames = value.match( core_rnotwhite ) || []; - - while ( (className = classNames[ i++ ]) ) { - // check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( type === core_strundefined || type === "boolean" ) { - if ( this.className ) { - // store className if set - jQuery._data( this, "__className__", this.className ); - } - - // If the element has a class name or if we're passed "false", - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || ""; - } - }); - }, - - hasClass: function( selector ) { - var className = " " + selector + " ", - i = 0, - l = this.length; - for ( ; i < l; i++ ) { - if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) >= 0 ) { - return true; - } - } - - return false; - }, - - val: function( value ) { - var ret, hooks, isFunction, - elem = this[0]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) { - return ret; - } - - ret = elem.value; - - return typeof ret === "string" ? - // handle most common string cases - ret.replace(rreturn, "") : - // handle cases where value is null/undef or number - ret == null ? "" : ret; - } - - return; - } - - isFunction = jQuery.isFunction( value ); - - return this.each(function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( isFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - } else if ( typeof val === "number" ) { - val += ""; - } else if ( jQuery.isArray( val ) ) { - val = jQuery.map(val, function ( value ) { - return value == null ? "" : value + ""; - }); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - }); - } -}); - -jQuery.extend({ - valHooks: { - option: { - get: function( elem ) { - // Use proper attribute retrieval(#6932, #12072) - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - elem.text; - } - }, - select: { - get: function( elem ) { - var value, option, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one" || index < 0, - values = one ? null : [], - max = one ? index + 1 : options.length, - i = index < 0 ? - max : - one ? index : 0; - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // oldIE doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - // Don't return options that are disabled or in a disabled optgroup - ( jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null ) && - ( !option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - if ( (option.selected = jQuery.inArray( jQuery(option).val(), values ) >= 0) ) { - optionSet = true; - } - } - - // force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - }, - - attr: function( elem, name, value ) { - var hooks, ret, - nType = elem.nodeType; - - // don't get/set attributes on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === core_strundefined ) { - return jQuery.prop( elem, name, value ); - } - - // All attributes are lowercase - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - name = name.toLowerCase(); - hooks = jQuery.attrHooks[ name ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : nodeHook ); - } - - if ( value !== undefined ) { - - if ( value === null ) { - jQuery.removeAttr( elem, name ); - - } else if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { - return ret; - - } else { - elem.setAttribute( name, value + "" ); - return value; - } - - } else if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { - return ret; - - } else { - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? - undefined : - ret; - } - }, - - removeAttr: function( elem, value ) { - var name, propName, - i = 0, - attrNames = value && value.match( core_rnotwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( (name = attrNames[i++]) ) { - propName = jQuery.propFix[ name ] || name; - - // Boolean attributes get special treatment (#10870) - if ( jQuery.expr.match.bool.test( name ) ) { - // Set corresponding property to false - if ( getSetInput && getSetAttribute || !ruseDefault.test( name ) ) { - elem[ propName ] = false; - // Support: IE<9 - // Also clear defaultChecked/defaultSelected (if appropriate) - } else { - elem[ jQuery.camelCase( "default-" + name ) ] = - elem[ propName ] = false; - } - - // See #9699 for explanation of this approach (setting first, then removal) - } else { - jQuery.attr( elem, name, "" ); - } - - elem.removeAttribute( getSetAttribute ? name : propName ); - } - } - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) { - // Setting the type on a radio button after the value resets the value in IE6-9 - // Reset value to default in case type is set after value during creation - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - }, - - prop: function( elem, name, value ) { - var ret, hooks, notxml, - nType = elem.nodeType; - - // don't get/set properties on text, comment and attribute nodes - if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); - - if ( notxml ) { - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - return hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ? - ret : - ( elem[ name ] = value ); - - } else { - return hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ? - ret : - elem[ name ]; - } - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set - // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - return tabindex ? - parseInt( tabindex, 10 ) : - rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ? - 0 : - -1; - } - } - } -}); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else if ( getSetInput && getSetAttribute || !ruseDefault.test( name ) ) { - // IE<8 needs the *property* name - elem.setAttribute( !getSetAttribute && jQuery.propFix[ name ] || name, name ); - - // Use defaultChecked and defaultSelected for oldIE - } else { - elem[ jQuery.camelCase( "default-" + name ) ] = elem[ name ] = true; - } - - return name; - } -}; -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { - var getter = jQuery.expr.attrHandle[ name ] || jQuery.find.attr; - - jQuery.expr.attrHandle[ name ] = getSetInput && getSetAttribute || !ruseDefault.test( name ) ? - function( elem, name, isXML ) { - var fn = jQuery.expr.attrHandle[ name ], - ret = isXML ? - undefined : - /* jshint eqeqeq: false */ - (jQuery.expr.attrHandle[ name ] = undefined) != - getter( elem, name, isXML ) ? - - name.toLowerCase() : - null; - jQuery.expr.attrHandle[ name ] = fn; - return ret; - } : - function( elem, name, isXML ) { - return isXML ? - undefined : - elem[ jQuery.camelCase( "default-" + name ) ] ? - name.toLowerCase() : - null; - }; -}); - -// fix oldIE attroperties -if ( !getSetInput || !getSetAttribute ) { - jQuery.attrHooks.value = { - set: function( elem, value, name ) { - if ( jQuery.nodeName( elem, "input" ) ) { - // Does not return so that setAttribute is also used - elem.defaultValue = value; - } else { - // Use nodeHook if defined (#1954); otherwise setAttribute is fine - return nodeHook && nodeHook.set( elem, value, name ); - } - } - }; -} - -// IE6/7 do not support getting/setting some attributes with get/setAttribute -if ( !getSetAttribute ) { - - // Use this for any attribute in IE6/7 - // This fixes almost every IE6/7 issue - nodeHook = { - set: function( elem, value, name ) { - // Set the existing or create a new attribute node - var ret = elem.getAttributeNode( name ); - if ( !ret ) { - elem.setAttributeNode( - (ret = elem.ownerDocument.createAttribute( name )) - ); - } - - ret.value = value += ""; - - // Break association with cloned elements by also using setAttribute (#9646) - return name === "value" || value === elem.getAttribute( name ) ? - value : - undefined; - } - }; - jQuery.expr.attrHandle.id = jQuery.expr.attrHandle.name = jQuery.expr.attrHandle.coords = - // Some attributes are constructed with empty-string values when not defined - function( elem, name, isXML ) { - var ret; - return isXML ? - undefined : - (ret = elem.getAttributeNode( name )) && ret.value !== "" ? - ret.value : - null; - }; - jQuery.valHooks.button = { - get: function( elem, name ) { - var ret = elem.getAttributeNode( name ); - return ret && ret.specified ? - ret.value : - undefined; - }, - set: nodeHook.set - }; - - // Set contenteditable to false on removals(#10429) - // Setting to empty string throws an error as an invalid value - jQuery.attrHooks.contenteditable = { - set: function( elem, value, name ) { - nodeHook.set( elem, value === "" ? false : value, name ); - } - }; - - // Set width and height to auto instead of 0 on empty string( Bug #8150 ) - // This is for removals - jQuery.each([ "width", "height" ], function( i, name ) { - jQuery.attrHooks[ name ] = { - set: function( elem, value ) { - if ( value === "" ) { - elem.setAttribute( name, "auto" ); - return value; - } - } - }; - }); -} - - -// Some attributes require a special call on IE -// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !jQuery.support.hrefNormalized ) { - // href/src property should get the full normalized URL (#10299/#12915) - jQuery.each([ "href", "src" ], function( i, name ) { - jQuery.propHooks[ name ] = { - get: function( elem ) { - return elem.getAttribute( name, 4 ); - } - }; - }); -} - -if ( !jQuery.support.style ) { - jQuery.attrHooks.style = { - get: function( elem ) { - // Return undefined in the case of empty string - // Note: IE uppercases css property names, but if we were to .toLowerCase() - // .cssText, that would destroy case senstitivity in URL's, like in "background" - return elem.style.cssText || undefined; - }, - set: function( elem, value ) { - return ( elem.style.cssText = value + "" ); - } - }; -} - -// Safari mis-reports the default selected property of an option -// Accessing the parent's selectedIndex property fixes it -if ( !jQuery.support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - var parent = elem.parentNode; - - if ( parent ) { - parent.selectedIndex; - - // Make sure that it also works with optgroups, see #5701 - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - return null; - } - }; -} - -jQuery.each([ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -}); - -// IE6/7 call enctype encoding -if ( !jQuery.support.enctype ) { - jQuery.propFix.enctype = "encoding"; -} - -// Radios and checkboxes getter/setter -jQuery.each([ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( jQuery.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 ); - } - } - }; - if ( !jQuery.support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - // Support: Webkit - // "" is returned instead of "on" if a value isn't specified - return elem.getAttribute("value") === null ? "on" : elem.value; - }; - } -}); -var rformElems = /^(?:input|select|textarea)$/i, - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|contextmenu)|click/, - rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)$/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - var tmp, events, t, handleObjIn, - special, eventHandle, handleObj, - handlers, type, namespaces, origType, - elemData = jQuery._data( elem ); - - // Don't attach events to noData or text/comment nodes (but allow plain objects) - if ( !elemData ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !(events = elemData.events) ) { - events = elemData.events = {}; - } - if ( !(eventHandle = elemData.handle) ) { - eventHandle = elemData.handle = function( e ) { - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== core_strundefined && (!e || jQuery.event.triggered !== e.type) ? - jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : - undefined; - }; - // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events - eventHandle.elem = elem; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( core_rnotwhite ) || [""]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[t] ) || []; - type = origType = tmp[1]; - namespaces = ( tmp[2] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend({ - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join(".") - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !(handlers = events[ type ]) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener/attachEvent if the special events handler returns false - if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - // Bind the global event handler to the element - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle, false ); - - } else if ( elem.attachEvent ) { - elem.attachEvent( "on" + type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - // Nullify elem to prevent memory leaks in IE - elem = null; - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - var j, handleObj, tmp, - origCount, t, events, - special, handlers, type, - namespaces, origType, - elemData = jQuery.hasData( elem ) && jQuery._data( elem ); - - if ( !elemData || !(events = elemData.events) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( core_rnotwhite ) || [""]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[t] ) || []; - type = origType = tmp[1]; - namespaces = ( tmp[2] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[2] && new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - delete elemData.handle; - - // removeData also checks for emptiness and clears the expando if empty - // so use it instead of delete - jQuery._removeData( elem, "events" ); - } - }, - - trigger: function( event, data, elem, onlyHandlers ) { - var handle, ontype, cur, - bubbleType, special, tmp, i, - eventPath = [ elem || document ], - type = core_hasOwn.call( event, "type" ) ? event.type : event, - namespaces = core_hasOwn.call( event, "namespace" ) ? event.namespace.split(".") : []; - - cur = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf(".") >= 0 ) { - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split("."); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf(":") < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join("."); - event.namespace_re = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === (elem.ownerDocument || document) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( (cur = eventPath[i++]) && !event.isPropagationStopped() ) { - - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && jQuery.acceptData( cur ) && handle.apply && handle.apply( cur, data ) === false ) { - event.preventDefault(); - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( (!special._default || special._default.apply( eventPath.pop(), data ) === false) && - jQuery.acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name name as the event. - // Can't use an .isFunction() check here because IE6/7 fails that test. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && elem[ type ] && !jQuery.isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - try { - elem[ type ](); - } catch ( e ) { - // IE<9 dies on focus/blur to hidden element (#1486,#12518) - // only reproducible on winXP IE8 native, not IE9 in IE8 mode - } - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - dispatch: function( event ) { - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( event ); - - var i, ret, handleObj, matched, j, - handlerQueue = [], - args = core_slice.call( arguments ), - handlers = ( jQuery._data( this, "events" ) || {} )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[0] = event; - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( (matched = handlerQueue[ i++ ]) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( (handleObj = matched.handlers[ j++ ]) && !event.isImmediatePropagationStopped() ) { - - // Triggered event must either 1) have no namespace, or - // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). - if ( !event.namespace_re || event.namespace_re.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) - .apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( (event.result = ret) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var sel, handleObj, matches, i, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - // Black-hole SVG instance trees (#13180) - // Avoid non-left-click bubbling in Firefox (#3861) - if ( delegateCount && cur.nodeType && (!event.button || event.type !== "click") ) { - - /* jshint eqeqeq: false */ - for ( ; cur != this; cur = cur.parentNode || this ) { - /* jshint eqeqeq: true */ - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && (cur.disabled !== true || event.type !== "click") ) { - matches = []; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matches[ sel ] === undefined ) { - matches[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) >= 0 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matches[ sel ] ) { - matches.push( handleObj ); - } - } - if ( matches.length ) { - handlerQueue.push({ elem: cur, handlers: matches }); - } - } - } - } - - // Add the remaining (directly-bound) handlers - if ( delegateCount < handlers.length ) { - handlerQueue.push({ elem: this, handlers: handlers.slice( delegateCount ) }); - } - - return handlerQueue; - }, - - fix: function( event ) { - if ( event[ jQuery.expando ] ) { - return event; - } - - // Create a writable copy of the event object and normalize some properties - var i, prop, copy, - type = event.type, - originalEvent = event, - fixHook = this.fixHooks[ type ]; - - if ( !fixHook ) { - this.fixHooks[ type ] = fixHook = - rmouseEvent.test( type ) ? this.mouseHooks : - rkeyEvent.test( type ) ? this.keyHooks : - {}; - } - copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; - - event = new jQuery.Event( originalEvent ); - - i = copy.length; - while ( i-- ) { - prop = copy[ i ]; - event[ prop ] = originalEvent[ prop ]; - } - - // Support: IE<9 - // Fix target property (#1925) - if ( !event.target ) { - event.target = originalEvent.srcElement || document; - } - - // Support: Chrome 23+, Safari? - // Target should not be a text node (#504, #13143) - if ( event.target.nodeType === 3 ) { - event.target = event.target.parentNode; - } - - // Support: IE<9 - // For mouse/key events, metaKey==false if it's undefined (#3368, #11328) - event.metaKey = !!event.metaKey; - - return fixHook.filter ? fixHook.filter( event, originalEvent ) : event; - }, - - // Includes some event props shared by KeyEvent and MouseEvent - props: "altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), - - fixHooks: {}, - - keyHooks: { - props: "char charCode key keyCode".split(" "), - filter: function( event, original ) { - - // Add which for key events - if ( event.which == null ) { - event.which = original.charCode != null ? original.charCode : original.keyCode; - } - - return event; - } - }, - - mouseHooks: { - props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), - filter: function( event, original ) { - var body, eventDoc, doc, - button = original.button, - fromElement = original.fromElement; - - // Calculate pageX/Y if missing and clientX/Y available - if ( event.pageX == null && original.clientX != null ) { - eventDoc = event.target.ownerDocument || document; - doc = eventDoc.documentElement; - body = eventDoc.body; - - event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); - event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); - } - - // Add relatedTarget, if necessary - if ( !event.relatedTarget && fromElement ) { - event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - // Note: button is not normalized, so don't use it - if ( !event.which && button !== undefined ) { - event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); - } - - return event; - } - }, - - special: { - load: { - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - focus: { - // Fire native event if possible so blur/focus sequence is correct - trigger: function() { - if ( this !== safeActiveElement() && this.focus ) { - try { - this.focus(); - return false; - } catch ( e ) { - // Support: IE<9 - // If we error on focus to hidden element (#1486, #12518), - // let .trigger() run the handlers - } - } - }, - delegateType: "focusin" - }, - blur: { - trigger: function() { - if ( this === safeActiveElement() && this.blur ) { - this.blur(); - return false; - } - }, - delegateType: "focusout" - }, - click: { - // For checkbox, fire native event so checked state will be right - trigger: function() { - if ( jQuery.nodeName( this, "input" ) && this.type === "checkbox" && this.click ) { - this.click(); - return false; - } - }, - - // For cross-browser consistency, don't fire native .click() on links - _default: function( event ) { - return jQuery.nodeName( event.target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Even when returnValue equals to undefined Firefox will still show alert - if ( event.result !== undefined ) { - event.originalEvent.returnValue = event.result; - } - } - } - }, - - simulate: function( type, elem, event, bubble ) { - // Piggyback on a donor event to simulate a different one. - // Fake originalEvent to avoid donor's stopPropagation, but if the - // simulated event prevents default then we do the same on the donor. - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true, - originalEvent: {} - } - ); - if ( bubble ) { - jQuery.event.trigger( e, null, elem ); - } else { - jQuery.event.dispatch.call( elem, e ); - } - if ( e.isDefaultPrevented() ) { - event.preventDefault(); - } - } -}; - -jQuery.removeEvent = document.removeEventListener ? - function( elem, type, handle ) { - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle, false ); - } - } : - function( elem, type, handle ) { - var name = "on" + type; - - if ( elem.detachEvent ) { - - // #8545, #7054, preventing memory leaks for custom events in IE6-8 - // detachEvent needed property on element, by name of that event, to properly expose it to GC - if ( typeof elem[ name ] === core_strundefined ) { - elem[ name ] = null; - } - - elem.detachEvent( name, handle ); - } - }; - -jQuery.Event = function( src, props ) { - // Allow instantiation without the 'new' keyword - if ( !(this instanceof jQuery.Event) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false || - src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || jQuery.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - if ( !e ) { - return; - } - - // If preventDefault exists, run it on the original event - if ( e.preventDefault ) { - e.preventDefault(); - - // Support: IE - // Otherwise set the returnValue property of the original event to false - } else { - e.returnValue = false; - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - if ( !e ) { - return; - } - // If stopPropagation exists, run it on the original event - if ( e.stopPropagation ) { - e.stopPropagation(); - } - - // Support: IE - // Set the cancelBubble property of the original event to true - e.cancelBubble = true; - }, - stopImmediatePropagation: function() { - this.isImmediatePropagationStopped = returnTrue; - this.stopPropagation(); - } -}; - -// Create mouseenter/leave events using mouseover/out and event-time checks -jQuery.each({ - mouseenter: "mouseover", - mouseleave: "mouseout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mousenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || (related !== target && !jQuery.contains( target, related )) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -}); - -// IE submit delegation -if ( !jQuery.support.submitBubbles ) { - - jQuery.event.special.submit = { - setup: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Lazy-add a submit handler when a descendant form may potentially be submitted - jQuery.event.add( this, "click._submit keypress._submit", function( e ) { - // Node name check avoids a VML-related crash in IE (#9807) - var elem = e.target, - form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; - if ( form && !jQuery._data( form, "submitBubbles" ) ) { - jQuery.event.add( form, "submit._submit", function( event ) { - event._submit_bubble = true; - }); - jQuery._data( form, "submitBubbles", true ); - } - }); - // return undefined since we don't need an event listener - }, - - postDispatch: function( event ) { - // If form was submitted by the user, bubble the event up the tree - if ( event._submit_bubble ) { - delete event._submit_bubble; - if ( this.parentNode && !event.isTrigger ) { - jQuery.event.simulate( "submit", this.parentNode, event, true ); - } - } - }, - - teardown: function() { - // Only need this for delegated form submit events - if ( jQuery.nodeName( this, "form" ) ) { - return false; - } - - // Remove delegated handlers; cleanData eventually reaps submit handlers attached above - jQuery.event.remove( this, "._submit" ); - } - }; -} - -// IE change delegation and checkbox/radio fix -if ( !jQuery.support.changeBubbles ) { - - jQuery.event.special.change = { - - setup: function() { - - if ( rformElems.test( this.nodeName ) ) { - // IE doesn't fire change on a check/radio until blur; trigger it on click - // after a propertychange. Eat the blur-change in special.change.handle. - // This still fires onchange a second time for check/radio after blur. - if ( this.type === "checkbox" || this.type === "radio" ) { - jQuery.event.add( this, "propertychange._change", function( event ) { - if ( event.originalEvent.propertyName === "checked" ) { - this._just_changed = true; - } - }); - jQuery.event.add( this, "click._change", function( event ) { - if ( this._just_changed && !event.isTrigger ) { - this._just_changed = false; - } - // Allow triggered, simulated change events (#11500) - jQuery.event.simulate( "change", this, event, true ); - }); - } - return false; - } - // Delegated event; lazy-add a change handler on descendant inputs - jQuery.event.add( this, "beforeactivate._change", function( e ) { - var elem = e.target; - - if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "changeBubbles" ) ) { - jQuery.event.add( elem, "change._change", function( event ) { - if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { - jQuery.event.simulate( "change", this.parentNode, event, true ); - } - }); - jQuery._data( elem, "changeBubbles", true ); - } - }); - }, - - handle: function( event ) { - var elem = event.target; - - // Swallow native change events from checkbox/radio, we already triggered them above - if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { - return event.handleObj.handler.apply( this, arguments ); - } - }, - - teardown: function() { - jQuery.event.remove( this, "._change" ); - - return !rformElems.test( this.nodeName ); - } - }; -} - -// Create "bubbling" focus and blur events -if ( !jQuery.support.focusinBubbles ) { - jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler while someone wants focusin/focusout - var attaches = 0, - handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - if ( attaches++ === 0 ) { - document.addEventListener( orig, handler, true ); - } - }, - teardown: function() { - if ( --attaches === 0 ) { - document.removeEventListener( orig, handler, true ); - } - } - }; - }); -} - -jQuery.fn.extend({ - - on: function( types, selector, data, fn, /*INTERNAL*/ one ) { - var type, origFn; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - this.on( type, selector, data, types[ type ], one ); - } - return this; - } - - if ( data == null && fn == null ) { - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return this; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return this.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - }); - }, - one: function( types, selector, data, fn ) { - return this.on( types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each(function() { - jQuery.event.remove( this, types, fn, selector ); - }); - }, - - trigger: function( type, data ) { - return this.each(function() { - jQuery.event.trigger( type, data, this ); - }); - }, - triggerHandler: function( type, data ) { - var elem = this[0]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -}); -var isSimple = /^.[^:#\[\.,]*$/, - rparentsprev = /^(?:parents|prev(?:Until|All))/, - rneedsContext = jQuery.expr.match.needsContext, - // methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend({ - find: function( selector ) { - var i, - ret = [], - self = this, - len = self.length; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter(function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - }) ); - } - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - // Needed because $( selector, context ) becomes $( context ).find( selector ) - ret = this.pushStack( len > 1 ? jQuery.unique( ret ) : ret ); - ret.selector = this.selector ? this.selector + " " + selector : selector; - return ret; - }, - - has: function( target ) { - var i, - targets = jQuery( target, this ), - len = targets.length; - - return this.filter(function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( this, targets[i] ) ) { - return true; - } - } - }); - }, - - not: function( selector ) { - return this.pushStack( winnow(this, selector || [], true) ); - }, - - filter: function( selector ) { - return this.pushStack( winnow(this, selector || [], false) ); - }, - - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - ret = [], - pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? - jQuery( selectors, context || this.context ) : - 0; - - for ( ; i < l; i++ ) { - for ( cur = this[i]; cur && cur !== context; cur = cur.parentNode ) { - // Always skip document fragments - if ( cur.nodeType < 11 && (pos ? - pos.index(cur) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector(cur, selectors)) ) { - - cur = ret.push( cur ); - break; - } - } - } - - return this.pushStack( ret.length > 1 ? jQuery.unique( ret ) : ret ); - }, - - // Determine the position of an element within - // the matched set of elements - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[0] && this[0].parentNode ) ? this.first().prevAll().length : -1; - } - - // index in selector - if ( typeof elem === "string" ) { - return jQuery.inArray( this[0], jQuery( elem ) ); - } - - // Locate the position of the desired element - return jQuery.inArray( - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[0] : elem, this ); - }, - - add: function( selector, context ) { - var set = typeof selector === "string" ? - jQuery( selector, context ) : - jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ), - all = jQuery.merge( this.get(), set ); - - return this.pushStack( jQuery.unique(all) ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter(selector) - ); - } -}); - -function sibling( cur, dir ) { - do { - cur = cur[ dir ]; - } while ( cur && cur.nodeType !== 1 ); - - return cur; -} - -jQuery.each({ - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return jQuery.dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, i, until ) { - return jQuery.dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return jQuery.dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return jQuery.dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, i, until ) { - return jQuery.dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, i, until ) { - return jQuery.dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return jQuery.sibling( elem.firstChild ); - }, - contents: function( elem ) { - return jQuery.nodeName( elem, "iframe" ) ? - elem.contentDocument || elem.contentWindow.document : - jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var ret = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - ret = jQuery.filter( selector, ret ); - } - - if ( this.length > 1 ) { - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - ret = jQuery.unique( ret ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - ret = ret.reverse(); - } - } - - return this.pushStack( ret ); - }; -}); - -jQuery.extend({ - filter: function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - return elems.length === 1 && elem.nodeType === 1 ? - jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [] : - jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - })); - }, - - dir: function( elem, dir, until ) { - var matched = [], - cur = elem[ dir ]; - - while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { - if ( cur.nodeType === 1 ) { - matched.push( cur ); - } - cur = cur[dir]; - } - return matched; - }, - - sibling: function( n, elem ) { - var r = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - r.push( n ); - } - } - - return r; - } -}); - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( jQuery.isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - /* jshint -W018 */ - return !!qualifier.call( elem, i, elem ) !== not; - }); - - } - - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - }); - - } - - if ( typeof qualifier === "string" ) { - if ( isSimple.test( qualifier ) ) { - return jQuery.filter( qualifier, elements, not ); - } - - qualifier = jQuery.filter( qualifier, elements ); - } - - return jQuery.grep( elements, function( elem ) { - return ( jQuery.inArray( elem, qualifier ) >= 0 ) !== not; - }); -} -function createSafeFragment( document ) { - var list = nodeNames.split( "|" ), - safeFrag = document.createDocumentFragment(); - - if ( safeFrag.createElement ) { - while ( list.length ) { - safeFrag.createElement( - list.pop() - ); - } - } - return safeFrag; -} - -var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + - "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", - rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, - rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\\s/>]", "i"), - rleadingWhitespace = /^\s+/, - rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi, - rtagName = /<([\w:]+)/, - rtbody = /\s*$/g, - - // We have to close these tags to support XHTML (#13200) - wrapMap = { - option: [ 1, "" ], - legend: [ 1, "
", "
" ], - area: [ 1, "", "" ], - param: [ 1, "", "" ], - thead: [ 1, "", "
" ], - tr: [ 2, "", "
" ], - col: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - // IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, - // unless wrapped in a div with non-breaking characters in front of it. - _default: jQuery.support.htmlSerialize ? [ 0, "", "" ] : [ 1, "X
", "
" ] - }, - safeFragment = createSafeFragment( document ), - fragmentDiv = safeFragment.appendChild( document.createElement("div") ); - -wrapMap.optgroup = wrapMap.option; -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -jQuery.fn.extend({ - text: function( value ) { - return jQuery.access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); - }, null, value, arguments.length ); - }, - - append: function() { - return this.domManip( arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - }); - }, - - prepend: function() { - return this.domManip( arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - }); - }, - - before: function() { - return this.domManip( arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - }); - }, - - after: function() { - return this.domManip( arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - }); - }, - - // keepData is for internal use only--do not document - remove: function( selector, keepData ) { - var elem, - elems = selector ? jQuery.filter( selector, this ) : this, - i = 0; - - for ( ; (elem = elems[i]) != null; i++ ) { - - if ( !keepData && elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem ) ); - } - - if ( elem.parentNode ) { - if ( keepData && jQuery.contains( elem.ownerDocument, elem ) ) { - setGlobalEval( getAll( elem, "script" ) ); - } - elem.parentNode.removeChild( elem ); - } - } - - return this; - }, - - empty: function() { - var elem, - i = 0; - - for ( ; (elem = this[i]) != null; i++ ) { - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - } - - // Remove any remaining nodes - while ( elem.firstChild ) { - elem.removeChild( elem.firstChild ); - } - - // If this is a select, ensure that it displays empty (#12336) - // Support: IE<9 - if ( elem.options && jQuery.nodeName( elem, "select" ) ) { - elem.options.length = 0; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function () { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - }); - }, - - html: function( value ) { - return jQuery.access( this, function( value ) { - var elem = this[0] || {}, - i = 0, - l = this.length; - - if ( value === undefined ) { - return elem.nodeType === 1 ? - elem.innerHTML.replace( rinlinejQuery, "" ) : - undefined; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - ( jQuery.support.htmlSerialize || !rnoshimcache.test( value ) ) && - ( jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && - !wrapMap[ ( rtagName.exec( value ) || ["", ""] )[1].toLowerCase() ] ) { - - value = value.replace( rxhtmlTag, "<$1>" ); - - try { - for (; i < l; i++ ) { - // Remove element nodes and prevent memory leaks - elem = this[i] || {}; - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch(e) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var - // Snapshot the DOM in case .domManip sweeps something relevant into its fragment - args = jQuery.map( this, function( elem ) { - return [ elem.nextSibling, elem.parentNode ]; - }), - i = 0; - - // Make the changes, replacing each context element with the new content - this.domManip( arguments, function( elem ) { - var next = args[ i++ ], - parent = args[ i++ ]; - - if ( parent ) { - // Don't use the snapshot next if it has moved (#13810) - if ( next && next.parentNode !== parent ) { - next = this.nextSibling; - } - jQuery( this ).remove(); - parent.insertBefore( elem, next ); - } - // Allow new content to include elements from the context set - }, true ); - - // Force removal if there was no new content (e.g., from empty arguments) - return i ? this : this.remove(); - }, - - detach: function( selector ) { - return this.remove( selector, true ); - }, - - domManip: function( args, callback, allowIntersection ) { - - // Flatten any nested arrays - args = core_concat.apply( [], args ); - - var first, node, hasScripts, - scripts, doc, fragment, - i = 0, - l = this.length, - set = this, - iNoClone = l - 1, - value = args[0], - isFunction = jQuery.isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( isFunction || !( l <= 1 || typeof value !== "string" || jQuery.support.checkClone || !rchecked.test( value ) ) ) { - return this.each(function( index ) { - var self = set.eq( index ); - if ( isFunction ) { - args[0] = value.call( this, index, self.html() ); - } - self.domManip( args, callback, allowIntersection ); - }); - } - - if ( l ) { - fragment = jQuery.buildFragment( args, this[ 0 ].ownerDocument, false, !allowIntersection && this ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - if ( first ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( this[i], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !jQuery._data( node, "globalEval" ) && jQuery.contains( doc, node ) ) { - - if ( node.src ) { - // Hope ajax is available... - jQuery._evalUrl( node.src ); - } else { - jQuery.globalEval( ( node.text || node.textContent || node.innerHTML || "" ).replace( rcleanScript, "" ) ); - } - } - } - } - - // Fix #11809: Avoid leaking memory - fragment = first = null; - } - } - - return this; - } -}); - -// Support: IE<8 -// Manipulating tables requires a tbody -function manipulationTarget( elem, content ) { - return jQuery.nodeName( elem, "table" ) && - jQuery.nodeName( content.nodeType === 1 ? content : content.firstChild, "tr" ) ? - - elem.getElementsByTagName("tbody")[0] || - elem.appendChild( elem.ownerDocument.createElement("tbody") ) : - elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = (jQuery.find.attr( elem, "type" ) !== null) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - var match = rscriptTypeMasked.exec( elem.type ); - if ( match ) { - elem.type = match[1]; - } else { - elem.removeAttribute("type"); - } - return elem; -} - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var elem, - i = 0; - for ( ; (elem = elems[i]) != null; i++ ) { - jQuery._data( elem, "globalEval", !refElements || jQuery._data( refElements[i], "globalEval" ) ); - } -} - -function cloneCopyEvent( src, dest ) { - - if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { - return; - } - - var type, i, l, - oldData = jQuery._data( src ), - curData = jQuery._data( dest, oldData ), - events = oldData.events; - - if ( events ) { - delete curData.handle; - curData.events = {}; - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - - // make the cloned public data object a copy from the original - if ( curData.data ) { - curData.data = jQuery.extend( {}, curData.data ); - } -} - -function fixCloneNodeIssues( src, dest ) { - var nodeName, e, data; - - // We do not need to do anything for non-Elements - if ( dest.nodeType !== 1 ) { - return; - } - - nodeName = dest.nodeName.toLowerCase(); - - // IE6-8 copies events bound via attachEvent when using cloneNode. - if ( !jQuery.support.noCloneEvent && dest[ jQuery.expando ] ) { - data = jQuery._data( dest ); - - for ( e in data.events ) { - jQuery.removeEvent( dest, e, data.handle ); - } - - // Event data gets referenced instead of copied if the expando gets copied too - dest.removeAttribute( jQuery.expando ); - } - - // IE blanks contents when cloning scripts, and tries to evaluate newly-set text - if ( nodeName === "script" && dest.text !== src.text ) { - disableScript( dest ).text = src.text; - restoreScript( dest ); - - // IE6-10 improperly clones children of object elements using classid. - // IE10 throws NoModificationAllowedError if parent is null, #12132. - } else if ( nodeName === "object" ) { - if ( dest.parentNode ) { - dest.outerHTML = src.outerHTML; - } - - // This path appears unavoidable for IE9. When cloning an object - // element in IE9, the outerHTML strategy above is not sufficient. - // If the src has innerHTML and the destination does not, - // copy the src.innerHTML into the dest.innerHTML. #10324 - if ( jQuery.support.html5Clone && ( src.innerHTML && !jQuery.trim(dest.innerHTML) ) ) { - dest.innerHTML = src.innerHTML; - } - - } else if ( nodeName === "input" && manipulation_rcheckableType.test( src.type ) ) { - // IE6-8 fails to persist the checked state of a cloned checkbox - // or radio button. Worse, IE6-7 fail to give the cloned element - // a checked appearance if the defaultChecked value isn't also set - - dest.defaultChecked = dest.checked = src.checked; - - // IE6-7 get confused and end up setting the value of a cloned - // checkbox/radio button to an empty string instead of "on" - if ( dest.value !== src.value ) { - dest.value = src.value; - } - - // IE6-8 fails to return the selected option to the default selected - // state when cloning options - } else if ( nodeName === "option" ) { - dest.defaultSelected = dest.selected = src.defaultSelected; - - // IE6-8 fails to set the defaultValue to the correct value when - // cloning other types of input fields - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -jQuery.each({ - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - i = 0, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone(true); - jQuery( insert[i] )[ original ]( elems ); - - // Modern browsers can apply jQuery collections as arrays, but oldIE needs a .get() - core_push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -}); - -function getAll( context, tag ) { - var elems, elem, - i = 0, - found = typeof context.getElementsByTagName !== core_strundefined ? context.getElementsByTagName( tag || "*" ) : - typeof context.querySelectorAll !== core_strundefined ? context.querySelectorAll( tag || "*" ) : - undefined; - - if ( !found ) { - for ( found = [], elems = context.childNodes || context; (elem = elems[i]) != null; i++ ) { - if ( !tag || jQuery.nodeName( elem, tag ) ) { - found.push( elem ); - } else { - jQuery.merge( found, getAll( elem, tag ) ); - } - } - } - - return tag === undefined || tag && jQuery.nodeName( context, tag ) ? - jQuery.merge( [ context ], found ) : - found; -} - -// Used in buildFragment, fixes the defaultChecked property -function fixDefaultChecked( elem ) { - if ( manipulation_rcheckableType.test( elem.type ) ) { - elem.defaultChecked = elem.checked; - } -} - -jQuery.extend({ - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var destElements, node, clone, i, srcElements, - inPage = jQuery.contains( elem.ownerDocument, elem ); - - if ( jQuery.support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { - clone = elem.cloneNode( true ); - - // IE<=8 does not properly clone detached, unknown element nodes - } else { - fragmentDiv.innerHTML = elem.outerHTML; - fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); - } - - if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) && - (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { - - // We eschew Sizzle here for performance reasons: http://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - // Fix all IE cloning issues - for ( i = 0; (node = srcElements[i]) != null; ++i ) { - // Ensure that the destination node is not null; Fixes #9587 - if ( destElements[i] ) { - fixCloneNodeIssues( node, destElements[i] ); - } - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0; (node = srcElements[i]) != null; i++ ) { - cloneCopyEvent( node, destElements[i] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - destElements = srcElements = node = null; - - // Return the cloned set - return clone; - }, - - buildFragment: function( elems, context, scripts, selection ) { - var j, elem, contains, - tmp, tag, tbody, wrap, - l = elems.length, - - // Ensure a safe fragment - safe = createSafeFragment( context ), - - nodes = [], - i = 0; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( jQuery.type( elem ) === "object" ) { - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || safe.appendChild( context.createElement("div") ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || ["", ""] )[1].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - - tmp.innerHTML = wrap[1] + elem.replace( rxhtmlTag, "<$1>" ) + wrap[2]; - - // Descend through wrappers to the right content - j = wrap[0]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Manually add leading whitespace removed by IE - if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { - nodes.push( context.createTextNode( rleadingWhitespace.exec( elem )[0] ) ); - } - - // Remove IE's autoinserted from table fragments - if ( !jQuery.support.tbody ) { - - // String was a , *may* have spurious - elem = tag === "table" && !rtbody.test( elem ) ? - tmp.firstChild : - - // String was a bare or - wrap[1] === "
" && !rtbody.test( elem ) ? - tmp : - 0; - - j = elem && elem.childNodes.length; - while ( j-- ) { - if ( jQuery.nodeName( (tbody = elem.childNodes[j]), "tbody" ) && !tbody.childNodes.length ) { - elem.removeChild( tbody ); - } - } - } - - jQuery.merge( nodes, tmp.childNodes ); - - // Fix #12392 for WebKit and IE > 9 - tmp.textContent = ""; - - // Fix #12392 for oldIE - while ( tmp.firstChild ) { - tmp.removeChild( tmp.firstChild ); - } - - // Remember the top-level container for proper cleanup - tmp = safe.lastChild; - } - } - } - - // Fix #11356: Clear elements from fragment - if ( tmp ) { - safe.removeChild( tmp ); - } - - // Reset defaultChecked for any radios and checkboxes - // about to be appended to the DOM in IE 6/7 (#8060) - if ( !jQuery.support.appendChecked ) { - jQuery.grep( getAll( nodes, "input" ), fixDefaultChecked ); - } - - i = 0; - while ( (elem = nodes[ i++ ]) ) { - - // #4087 - If origin and destination elements are the same, and this is - // that element, do not do anything - if ( selection && jQuery.inArray( elem, selection ) !== -1 ) { - continue; - } - - contains = jQuery.contains( elem.ownerDocument, elem ); - - // Append to fragment - tmp = getAll( safe.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( contains ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( (elem = tmp[ j++ ]) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - tmp = null; - - return safe; - }, - - cleanData: function( elems, /* internal */ acceptData ) { - var elem, type, id, data, - i = 0, - internalKey = jQuery.expando, - cache = jQuery.cache, - deleteExpando = jQuery.support.deleteExpando, - special = jQuery.event.special; - - for ( ; (elem = elems[i]) != null; i++ ) { - - if ( acceptData || jQuery.acceptData( elem ) ) { - - id = elem[ internalKey ]; - data = id && cache[ id ]; - - if ( data ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Remove cache only if it was not already removed by jQuery.event.remove - if ( cache[ id ] ) { - - delete cache[ id ]; - - // IE does not allow us to delete expando properties from nodes, - // nor does it have a removeAttribute function on Document nodes; - // we must handle all of these cases - if ( deleteExpando ) { - delete elem[ internalKey ]; - - } else if ( typeof elem.removeAttribute !== core_strundefined ) { - elem.removeAttribute( internalKey ); - - } else { - elem[ internalKey ] = null; - } - - core_deletedIds.push( id ); - } - } - } - } - }, - - _evalUrl: function( url ) { - return jQuery.ajax({ - url: url, - type: "GET", - dataType: "script", - async: false, - global: false, - "throws": true - }); - } -}); -jQuery.fn.extend({ - wrapAll: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapAll( html.call(this, i) ); - }); - } - - if ( this[0] ) { - // The elements to wrap the target around - var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true); - - if ( this[0].parentNode ) { - wrap.insertBefore( this[0] ); - } - - wrap.map(function() { - var elem = this; - - while ( elem.firstChild && elem.firstChild.nodeType === 1 ) { - elem = elem.firstChild; - } - - return elem; - }).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( jQuery.isFunction( html ) ) { - return this.each(function(i) { - jQuery(this).wrapInner( html.call(this, i) ); - }); - } - - return this.each(function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - }); - }, - - wrap: function( html ) { - var isFunction = jQuery.isFunction( html ); - - return this.each(function(i) { - jQuery( this ).wrapAll( isFunction ? html.call(this, i) : html ); - }); - }, - - unwrap: function() { - return this.parent().each(function() { - if ( !jQuery.nodeName( this, "body" ) ) { - jQuery( this ).replaceWith( this.childNodes ); - } - }).end(); - } -}); -var iframe, getStyles, curCSS, - ralpha = /alpha\([^)]*\)/i, - ropacity = /opacity\s*=\s*([^)]*)/, - rposition = /^(top|right|bottom|left)$/, - // swappable if display is none or starts with table except "table", "table-cell", or "table-caption" - // see here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rmargin = /^margin/, - rnumsplit = new RegExp( "^(" + core_pnum + ")(.*)$", "i" ), - rnumnonpx = new RegExp( "^(" + core_pnum + ")(?!px)[a-z%]+$", "i" ), - rrelNum = new RegExp( "^([+-])=(" + core_pnum + ")", "i" ), - elemdisplay = { BODY: "block" }, - - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: 0, - fontWeight: 400 - }, - - cssExpand = [ "Top", "Right", "Bottom", "Left" ], - cssPrefixes = [ "Webkit", "O", "Moz", "ms" ]; - -// return a css property mapped to a potentially vendor prefixed property -function vendorPropName( style, name ) { - - // shortcut for names that are not vendor prefixed - if ( name in style ) { - return name; - } - - // check for vendor prefixed names - var capName = name.charAt(0).toUpperCase() + name.slice(1), - origName = name, - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in style ) { - return name; - } - } - - return origName; -} - -function isHidden( elem, el ) { - // isHidden might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); -} - -function showHide( elements, show ) { - var display, elem, hidden, - values = [], - index = 0, - length = elements.length; - - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - values[ index ] = jQuery._data( elem, "olddisplay" ); - display = elem.style.display; - if ( show ) { - // Reset the inline display of this element to learn if it is - // being hidden by cascaded rules or not - if ( !values[ index ] && display === "none" ) { - elem.style.display = ""; - } - - // Set elements which have been overridden with display: none - // in a stylesheet to whatever the default browser style is - // for such an element - if ( elem.style.display === "" && isHidden( elem ) ) { - values[ index ] = jQuery._data( elem, "olddisplay", css_defaultDisplay(elem.nodeName) ); - } - } else { - - if ( !values[ index ] ) { - hidden = isHidden( elem ); - - if ( display && display !== "none" || !hidden ) { - jQuery._data( elem, "olddisplay", hidden ? display : jQuery.css( elem, "display" ) ); - } - } - } - } - - // Set the display of most of the elements in a second loop - // to avoid the constant reflow - for ( index = 0; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - if ( !show || elem.style.display === "none" || elem.style.display === "" ) { - elem.style.display = show ? values[ index ] || "" : "none"; - } - } - - return elements; -} - -jQuery.fn.extend({ - css: function( name, value ) { - return jQuery.access( this, function( elem, name, value ) { - var len, styles, - map = {}, - i = 0; - - if ( jQuery.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - }, - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each(function() { - if ( isHidden( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - }); - } -}); - -jQuery.extend({ - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "columnCount": true, - "fillOpacity": true, - "fontWeight": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: { - // normalize float css property - "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat" - }, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = jQuery.camelCase( name ), - style = elem.style; - - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // convert relative number strings (+= or -=) to relative numbers. #7345 - if ( type === "string" && (ret = rrelNum.exec( value )) ) { - value = ( ret[1] + 1 ) * ret[2] + parseFloat( jQuery.css( elem, name ) ); - // Fixes bug #9237 - type = "number"; - } - - // Make sure that NaN and null values aren't set. See: #7116 - if ( value == null || type === "number" && isNaN( value ) ) { - return; - } - - // If a number was passed in, add 'px' to the (except for certain CSS properties) - if ( type === "number" && !jQuery.cssNumber[ origName ] ) { - value += "px"; - } - - // Fixes #8908, it can be done more correctly by specifing setters in cssHooks, - // but it would mean to define eight (for every problematic property) identical functions - if ( !jQuery.support.clearCloneStyle && value === "" && name.indexOf("background") === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value, extra )) !== undefined ) { - - // Wrapped to prevent IE from throwing errors when 'invalid' values are provided - // Fixes bug #5509 - try { - style[ name ] = value; - } catch(e) {} - } - - } else { - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) { - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var num, val, hooks, - origName = jQuery.camelCase( name ); - - // Make sure that we're working with the right name - name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( elem.style, origName ) ); - - // gets hook for the prefixed version - // followed by the unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - //convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Return, converting to number if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || jQuery.isNumeric( num ) ? num || 0 : val; - } - return val; - } -}); - -// NOTE: we've included the "window" in window.getComputedStyle -// because jsdom on node.js will break without it. -if ( window.getComputedStyle ) { - getStyles = function( elem ) { - return window.getComputedStyle( elem, null ); - }; - - curCSS = function( elem, name, _computed ) { - var width, minWidth, maxWidth, - computed = _computed || getStyles( elem ), - - // getPropertyValue is only needed for .css('filter') in IE9, see #12537 - ret = computed ? computed.getPropertyValue( name ) || computed[ name ] : undefined, - style = elem.style; - - if ( computed ) { - - if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Chrome < 17 and Safari 5.0 uses "computed value" instead of "used value" for margin-right - // Safari 5.1.7 (at least) returns percentage for a larger set of values, but width seems to be reliably pixels - // this is against the CSSOM draft spec: http://dev.w3.org/csswg/cssom/#resolved-values - if ( rnumnonpx.test( ret ) && rmargin.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret; - }; -} else if ( document.documentElement.currentStyle ) { - getStyles = function( elem ) { - return elem.currentStyle; - }; - - curCSS = function( elem, name, _computed ) { - var left, rs, rsLeft, - computed = _computed || getStyles( elem ), - ret = computed ? computed[ name ] : undefined, - style = elem.style; - - // Avoid setting ret to empty string here - // so we don't default to auto - if ( ret == null && style && style[ name ] ) { - ret = style[ name ]; - } - - // From the awesome hack by Dean Edwards - // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291 - - // If we're not dealing with a regular pixel number - // but a number that has a weird ending, we need to convert it to pixels - // but not position css attributes, as those are proportional to the parent element instead - // and we can't measure the parent instead because it might trigger a "stacking dolls" problem - if ( rnumnonpx.test( ret ) && !rposition.test( name ) ) { - - // Remember the original values - left = style.left; - rs = elem.runtimeStyle; - rsLeft = rs && rs.left; - - // Put in the new values to get a computed value out - if ( rsLeft ) { - rs.left = elem.currentStyle.left; - } - style.left = name === "fontSize" ? "1em" : ret; - ret = style.pixelLeft + "px"; - - // Revert the changed values - style.left = left; - if ( rsLeft ) { - rs.left = rsLeft; - } - } - - return ret === "" ? "auto" : ret; - }; -} - -function setPositiveNumber( elem, value, subtract ) { - var matches = rnumsplit.exec( value ); - return matches ? - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 1 ] - ( subtract || 0 ) ) + ( matches[ 2 ] || "px" ) : - value; -} - -function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { - var i = extra === ( isBorderBox ? "border" : "content" ) ? - // If we already have the right measurement, avoid augmentation - 4 : - // Otherwise initialize for horizontal or vertical properties - name === "width" ? 1 : 0, - - val = 0; - - for ( ; i < 4; i += 2 ) { - // both box models exclude margin, so add it if we want it - if ( extra === "margin" ) { - val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); - } - - if ( isBorderBox ) { - // border-box includes padding, so remove it if we want content - if ( extra === "content" ) { - val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // at this point, extra isn't border nor margin, so remove border - if ( extra !== "margin" ) { - val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } else { - // at this point, extra isn't content, so add padding - val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // at this point, extra isn't content nor padding, so add border - if ( extra !== "padding" ) { - val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - return val; -} - -function getWidthOrHeight( elem, name, extra ) { - - // Start with offset property, which is equivalent to the border-box value - var valueIsBorderBox = true, - val = name === "width" ? elem.offsetWidth : elem.offsetHeight, - styles = getStyles( elem ), - isBorderBox = jQuery.support.boxSizing && jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // some non-html elements return undefined for offsetWidth, so check for null/undefined - // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285 - // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668 - if ( val <= 0 || val == null ) { - // Fall back to computed then uncomputed css if necessary - val = curCSS( elem, name, styles ); - if ( val < 0 || val == null ) { - val = elem.style[ name ]; - } - - // Computed unit is not pixels. Stop here and return. - if ( rnumnonpx.test(val) ) { - return val; - } - - // we need the check for style in case a browser which returns unreliable values - // for getComputedStyle silently falls back to the reliable elem.style - valueIsBorderBox = isBorderBox && ( jQuery.support.boxSizingReliable || val === elem.style[ name ] ); - - // Normalize "", auto, and prepare for extra - val = parseFloat( val ) || 0; - } - - // use the active box-sizing model to add/subtract irrelevant styles - return ( val + - augmentWidthOrHeight( - elem, - name, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles - ) - ) + "px"; -} - -// Try to determine the default display value of an element -function css_defaultDisplay( nodeName ) { - var doc = document, - display = elemdisplay[ nodeName ]; - - if ( !display ) { - display = actualDisplay( nodeName, doc ); - - // If the simple way fails, read from inside an iframe - if ( display === "none" || !display ) { - // Use the already-created iframe if possible - iframe = ( iframe || - jQuery("