diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..03ce0b43fa --- /dev/null +++ b/.clang-format @@ -0,0 +1,149 @@ +--- +Language: Cpp +# BasedOnStyle: LLVM +AccessModifierOffset: -2 +AlignAfterOpenBracket: Align +AlignConsecutiveMacros: false +AlignConsecutiveAssignments: false +AlignConsecutiveBitFields: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlines: Right +AlignOperands: Align +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortEnumsOnASingleLine: true +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: All +AllowShortLambdasOnASingleLine: All +AllowShortIfStatementsOnASingleLine: Never +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: MultiLine +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: Never + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: BeforeColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + SortPriority: 0 + - Regex: '^(<|"(gtest|gmock|isl|json)/)' + Priority: 3 + SortPriority: 0 + - Regex: '.*' + Priority: 1 + SortPriority: 0 +IncludeIsMainRegex: '(Test)?$' +IncludeIsMainSourceRegex: '' +IndentCaseLabels: false +IndentCaseBlocks: false +IndentGotoLabels: true +IndentPPDirectives: None +IndentExternBlock: AfterExternBlock +IndentWidth: 2 +IndentWrappedFunctionNames: false +InsertTrailingCommas: None +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: true +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Auto +ObjCBlockIndentWidth: 2 +ObjCBreakBeforeNestedBlockParam: true +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 60 +PointerAlignment: Right +ReflowComments: true +SortIncludes: true +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +Standard: Latest +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +TabWidth: 8 +UseCRLF: false +UseTab: Never +WhitespaceSensitiveMacros: + - STRINGIZE + - PP_STRINGIZE + - BOOST_PP_STRINGIZE +... + diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml new file mode 100644 index 0000000000..4421cc7e6d --- /dev/null +++ b/.github/workflows/format.yml @@ -0,0 +1,29 @@ +name: clang-format + +on: [push, pull_request] + +jobs: + format: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - uses: cvmfs-contrib/github-action-cvmfs@v2 + - name: Start container + run: | + docker run -it --name CI_container -v ${GITHUB_WORKSPACE}:/Package -v /cvmfs:/cvmfs:shared -d ghcr.io/aidasoft/centos7:latest /bin/bash + - name: Add upstream + run: | + docker exec CI_container /bin/bash -c 'cd ./Package + git remote add upstream https://github.com/HEP-FCC/FCCAnalyses.git + git fetch upstream' + - name: Run formatter + run: | + docker exec CI_container /bin/bash -c 'cd ./Package + source /cvmfs/sft.cern.ch/lcg/contrib/clang/14.0.6/x86_64-centos7/setup.sh + git clang-format --style=file $(git merge-base upstream/master HEAD)' + - name: Check cleanliness + run: | + docker exec CI_container /bin/bash -c 'cd ./Package + git diff' diff --git a/.gitignore b/.gitignore index 0b28a4e7ae..eed87bf725 100644 --- a/.gitignore +++ b/.gitignore @@ -38,6 +38,7 @@ MANIFEST # Output outputs/ +BatchOutputs/ # PyInstaller # Usually these files are written by a python script from a template @@ -96,3 +97,6 @@ venv.bak/ # Benchmarking benchmark*json + +# Local configuration +.fccana/* diff --git a/.zenodo.json b/.zenodo.json index 18ce474087..a4efdc202a 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -1,7 +1,7 @@ { "creators": [ { - "affiliation": "CERN", + "affiliation": "Ecole Polytechnique Fédérale de Lausanne", "name": "Helsens, Clement", "orcid": "0000-0002-9243-7554" }, diff --git a/CMakeLists.txt b/CMakeLists.txt index 60e572dc39..0df276bd99 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,7 +14,7 @@ include(CTest) #--- options ------------------------------------------------------------------ -set(WITH_ACTS AUTO CACHE STRING "Build analyzers that need Acts") +set(WITH_ACTS OFF CACHE STRING "Build analyzers that need Acts") set_property(CACHE WITH_ACTS PROPERTY STRINGS AUTO ON OFF) set(WITH_DD4HEP AUTO CACHE STRING "Build analyzers that need DD4hep") @@ -62,6 +62,8 @@ find_package(podio) set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake ${CMAKE_MODULE_PATH}) find_package(FastJet) +find_package( Delphes REQUIRED ) + if(WITH_ACTS) find_package( Acts COMPONENTS Core ) @@ -88,6 +90,17 @@ if(WITH_DD4HEP) endif() endif() +if(WITH_ONNX) + find_package(ONNXRuntime) + if(ONNXRuntime_FOUND) + + elseif(WITH_ONNX STREQUAL AUTO) + message(WARNING "ONNXRuntime not found. Skipping ONNX-dependent analyzers.") + set(WITH_ONNX OFF) + else() + message(FATAL_ERROR "Failed to locate ONNXRuntime!") + endif() +endif() if(WITH_ONNX AND BUILD_TESTING) # currently these files are only needed by ONNX-parts # Grab the test files into a cached directory @@ -122,22 +135,6 @@ file(GLOB _run_python_files config/*.py) install(FILES ${_run_python_files} DESTINATION ${CMAKE_INSTALL_PREFIX}/python/config) install(FILES config/doPlots.py PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ DESTINATION ${CMAKE_INSTALL_PREFIX}/python/config) -#--- Awkward setup ------------------------------------------------------------ - -execute_process(COMMAND python -m awkward.config --incdir OUTPUT_VARIABLE _AWKWARD_INCLUDE OUTPUT_STRIP_TRAILING_WHITESPACE) -set(AWKWARD_INCLUDE ${_AWKWARD_INCLUDE}) -execute_process(COMMAND python -m awkward.config --libdir OUTPUT_VARIABLE _AWKWARD_LIBRARIES OUTPUT_STRIP_TRAILING_WHITESPACE) -set(AWKWARD_LIBRARIES ${_AWKWARD_LIBRARIES}) - -include_directories(BEFORE "${AWKWARD_INCLUDE}") - -find_library(CPU-KERNELS awkward-cpu-kernels REQUIRED HINTS ${AWKWARD_LIBRARIES}) -find_library(LIBAWKWARD awkward REQUIRED HINTS ${AWKWARD_LIBRARIES}) -find_library(LIBDL dl REQUIRED) - -message(STATUS "Libraries: ${CPU-KERNELS} ${LIBAWKWARD} ${LIBDL}") -message(STATUS "includes--------------------- main: ${AWKWARD_INCLUDE}") -message(STATUS "Library---------------------- main: ${AWKWARD_LIBRARIES}") #--- Descend into subdirectories ---------------------------------------------- diff --git a/README.md b/README.md index c36d550961..82bb375b1d 100644 --- a/README.md +++ b/README.md @@ -10,11 +10,11 @@ files and producing the plots. > > To have access to the FCC samples, you need to be subscribed to one of the > following e-groups (with owner approval) `fcc-eos-read-xx` with `xx=ee,hh,eh`. -> For the time being, the configuration files are accessible on `fccsw` public -> AFS. This is not optimal and will be changed in the future with migration to DIRAC, thus you are -> also kindly asked to contact `emmanuel.perez@cern.ch`, `gerardo.ganis@cern.ch`, `clement.helsens@cern.ch` and request access to -> `/afs/cern.ch/work/f/fccsw/public/FCCDicts/`. -> +> The configuration files are accessible at `/afs/cern.ch/work/f/fccsw/public/FCCDicts/` with a mirror at `/cvmfs/fcc.cern.ch/FCCDicts/`. +> For accessing/reading information about existing datasets you do not need special rights. +> However, if you need new datasets, you are invited to contact `emmanuel.perez@cern.ch`, `gerardo.ganis@cern.ch` or `juraj.smiesko@cern.ch` +> who will explian the procedure, including granting the required access, where relevant. +> Detailed code documentation can be found [here](http://hep-fcc.github.io/FCCAnalyses/doc/latest/index.html). @@ -31,6 +31,8 @@ Detailed code documentation can be found * [Pre-selection](#pre-selection) * [Final selection](#final-selection) * [Plotting](#plotting) + * [Contributing](#contributing) + * [Formating](#code-formating) ## RootDataFrame based @@ -46,9 +48,18 @@ ROOT dataframe documentation is available ## Getting started -In order to use the FCC analysers within ROOT dataframe, a dictionary needs to -be built and put into `LD_LIBRARY_PATH` (this happens in `setup.sh`). The -following needs to be done when running local code and for developers. +In order to use the FCC analyzers within ROOT RDataFrame, a dictionary needs to +be built and put into `LD_LIBRARY_PATH`. In order to build and load FCCAnalyses +with default options one needs to run following two commands: + +```shell +source ./setup.sh +fccanalysis build +``` + +The FCCAnalyses is a CMake based project and any customizations can be provided +in classic CMake style, the following commands are equivalent to default version +of FCCAnalyses: ```shell source ./setup.sh @@ -63,6 +74,21 @@ cd .. > Each time changes are made in the C++ code, for example in > `analyzers/dataframe/` please do not forget to re-compile :) > +> To cleanly recompile the default version of FCCAnalyses one can use +> `fccanalysis build --clean-build`. + +In order to provide the possibility to keep developing an analysis with well +defined Key4hep stack, the sub-command `fccanalysis pin` is provided. One can +pin his/her analysis with +``` +source setup.sh +fccanalysis pin +``` + +To remove the pin run +``` +fccanalysis pin --clear +``` ## Generalities @@ -120,7 +146,7 @@ where `p8_ee_ZH_ecm240` should match an existing sample in the database, of output files) and `output` in case you need to change the name of the output file (please note that then the sample will not be matched in the database for `finalSel.py` histograms normalisation). The other parameters are explained in -[the example file](https://github.com/HEP-FCC/FCCAnalyses/tree/master/example/FCCee/higgs/mH-recoil/analysis_stage1.py). +[the example file](https://github.com/HEP-FCC/FCCAnalyses/blob/master/examples/FCCee/higgs/mH-recoil/mumu/analysis_stage1.py). To run the pre-selection stage of the example analysis run: @@ -195,3 +221,25 @@ Resulting plots will be located the `outdir` defined in the analysis file. In an attempt to ease the development of new physics case studies, such as for the [FCCee physics performance](https://github.com/HEP-FCC/FCCeePhysicsPerformance) cases, a new experimental analysis package creation tool is introduced. [See here](case-studies/README.md) for more details. + + +## Contributing + +### Code formating + +The preferred style of the C++ code in the FCCAnalyses is LLVM which is checked +by CI job. + +Currently `clang-format` is not available in the Key4hep stack, but one can +obtain a suitable version of it from CVMFS thanks to LCG: +``` +source /cvmfs/sft.cern.ch/lcg/contrib/clang/14.0.6/x86_64-centos7/setup.sh +``` + +Then to apply formatting to a given file: +``` +clang-format -i -style=file /path/to/file.cpp +``` + +Another way to obtain a recent version of `clang-format` is through downloading +[Key4hep Spack instance](https://key4hep.github.io/key4hep-doc/spack-build-instructions-for-librarians/spack-setup.html#downloading-a-spack-instance). diff --git a/addons/FastJet/python/jetClusteringHelper.py b/addons/FastJet/python/jetClusteringHelper.py new file mode 100644 index 0000000000..5618974006 --- /dev/null +++ b/addons/FastJet/python/jetClusteringHelper.py @@ -0,0 +1,90 @@ +import json +import ROOT + + +class ExclusiveJetClusteringHelper: + def __init__(self, coll, njets, tag=""): + + self.input_coll = coll + self.njets = njets + + self.tag = tag + if tag != "": + self.tag = "_{}".format(tag) + + part_px = "part{}_px".format(self.tag) + part_py = "part{}_py".format(self.tag) + part_pz = "part{}_pz".format(self.tag) + part_e = "part{}_e".format(self.tag) + part_m = "part{}_m".format(self.tag) + part_q = "part{}_q".format(self.tag) + + pjetc = "pjetc{}".format(self.tag) + + _jet = "_jet{}".format(self.tag) + jet = "jet{}".format(self.tag) + _jetc = "_jetc{}".format(self.tag) + jetc = "jetc{}".format(self.tag) + + # compute jet observables + + observables = ["p", "e", "mass", "phi", "theta", "nconst"] + + self.jet_obs = dict() + for obs in observables: + self.jet_obs[obs] = "jet_{}{}".format(obs, self.tag) + event_njet = "event_njet{}".format(self.tag) + + self.jets = jet + self.constituents = jetc + + self.definition = dict() + + # get single particle properties + self.definition[part_px] = "ReconstructedParticle::get_px({})".format(self.input_coll) + self.definition[part_py] = "ReconstructedParticle::get_py({})".format(self.input_coll) + self.definition[part_pz] = "ReconstructedParticle::get_pz({})".format(self.input_coll) + self.definition[part_e] = "ReconstructedParticle::get_e({})".format(self.input_coll) + self.definition[part_m] = "ReconstructedParticle::get_mass({})".format(self.input_coll) + self.definition[part_q] = "ReconstructedParticle::get_charge({})".format(self.input_coll) + + # form fastjet pseudo jets + self.definition[pjetc] = "JetClusteringUtils::set_pseudoJets({}, {}, {}, {})".format( + part_px, part_py, part_pz, part_e + ) + + # run jet clustering with all reconstructed particles. ee_kt_algorithm, R=1.5, inclusive clustering, E-scheme + self.definition[_jet] = "JetClustering::clustering_ee_kt(2, {}, 1, 0)({})".format(njets, pjetc) + + # get the jets out of the struct + self.definition[jet] = "JetClusteringUtils::get_pseudoJets({})".format(_jet) + + # get the jets constituents out of the struct + self.definition[_jetc] = "JetClusteringUtils::get_constituents({})".format(_jet) + + # get constituents + self.definition[jetc] = "JetConstituentsUtils::build_constituents_cluster({}, {})".format( + self.input_coll, _jetc + ) + + # compute jet observables + self.definition[self.jet_obs["p"]] = "JetClusteringUtils::get_p({})".format(self.jets) + self.definition[self.jet_obs["e"]] = "JetClusteringUtils::get_e({})".format(self.jets) + self.definition[self.jet_obs["mass"]] = "JetClusteringUtils::get_m({})".format(self.jets) + self.definition[self.jet_obs["phi"]] = "JetClusteringUtils::get_phi({})".format(self.jets) + self.definition[self.jet_obs["theta"]] = "JetClusteringUtils::get_theta({})".format(self.jets) + self.definition[self.jet_obs["nconst"]] = "JetConstituentsUtils::count_consts({})".format(self.constituents) + self.definition[event_njet] = "JetConstituentsUtils::count_jets({})".format(self.constituents) + + def define(self, df): + + for var, call in self.definition.items(): + df = df.Define(var, call) + + return df + + def outputBranches(self): + + out = list(self.jet_obs.values()) + out += [obs for obs in self.definition.keys() if "event_" in obs] + return out diff --git a/addons/ONNXRuntime/CMakeLists.txt b/addons/ONNXRuntime/CMakeLists.txt index 46eefcb2c2..12cf7961e7 100644 --- a/addons/ONNXRuntime/CMakeLists.txt +++ b/addons/ONNXRuntime/CMakeLists.txt @@ -40,5 +40,5 @@ if(BUILD_TESTING) ) endif() # add all integration tests - add_integration_test("examples/FCCee/test/weaver_inference.py") + add_integration_test("examples/FCCee/weaver/analysis_inference.py") endif() diff --git a/addons/ONNXRuntime/python/jetFlavourHelper.py b/addons/ONNXRuntime/python/jetFlavourHelper.py new file mode 100644 index 0000000000..d40b67e519 --- /dev/null +++ b/addons/ONNXRuntime/python/jetFlavourHelper.py @@ -0,0 +1,284 @@ +import json +import ROOT +import sys + + +class JetFlavourHelper: + def __init__(self, coll, jet, jetc, tag=""): + + self.jet = jet + self.const = jetc + + self.tag = tag + if tag != "": + self.tag = "_{}".format(tag) + + self.particle = coll["GenParticles"] + self.pfcand = coll["PFParticles"] + self.pftrack = coll["PFTracks"] + self.pfphoton = coll["PFPhotons"] + self.pfnh = coll["PFNeutralHadrons"] + self.trackstate = coll["TrackState"] + self.trackerhits = coll["TrackerHits"] + self.calohits = coll["CalorimeterHits"] + self.dndx = coll["dNdx"] + self.l = coll["PathLength"] + self.bz = coll["Bz"] + + self.definition = dict() + + # ===== VERTEX + # MC primary vertex + self.definition["pv{}".format(self.tag)] = "FCCAnalyses::MCParticle::get_EventPrimaryVertexP4()( {} )".format( + self.particle + ) + + # build jet constituents lists + self.definition["pfcand_isMu{}".format(self.tag)] = "JetConstituentsUtils::get_isMu({})".format(self.const) + self.definition["pfcand_isEl{}".format(self.tag)] = "JetConstituentsUtils::get_isEl({})".format(self.const) + self.definition["pfcand_isChargedHad{}".format(self.tag)] = "JetConstituentsUtils::get_isChargedHad({})".format( + self.const + ) + self.definition["pfcand_isGamma{}".format(self.tag)] = "JetConstituentsUtils::get_isGamma({})".format( + self.const + ) + self.definition["pfcand_isNeutralHad{}".format(self.tag)] = "JetConstituentsUtils::get_isNeutralHad({})".format( + self.const + ) + + # kinematics, displacement, PID + self.definition["pfcand_e{}".format(self.tag)] = "JetConstituentsUtils::get_e({})".format(self.const) + self.definition["pfcand_p{}".format(self.tag)] = "JetConstituentsUtils::get_p({})".format(self.const) + self.definition["pfcand_theta{}".format(self.tag)] = "JetConstituentsUtils::get_theta({})".format(self.const) + self.definition["pfcand_phi{}".format(self.tag)] = "JetConstituentsUtils::get_phi({})".format(self.const) + self.definition["pfcand_charge{}".format(self.tag)] = "JetConstituentsUtils::get_charge({})".format(self.const) + self.definition["pfcand_type{}".format(self.tag)] = "JetConstituentsUtils::get_type({})".format(self.const) + self.definition["pfcand_erel{}".format(self.tag)] = "JetConstituentsUtils::get_erel_cluster({}, {})".format( + jet, self.const + ) + + self.definition[ + "pfcand_erel_log{}".format(self.tag) + ] = "JetConstituentsUtils::get_erel_log_cluster({}, {})".format(jet, self.const) + + self.definition[ + "pfcand_thetarel{}".format(self.tag) + ] = "JetConstituentsUtils::get_thetarel_cluster({}, {})".format(jet, self.const) + + self.definition["pfcand_phirel{}".format(self.tag)] = "JetConstituentsUtils::get_phirel_cluster({}, {})".format( + jet, self.const + ) + + self.definition[ + "pfcand_dndx{}".format(self.tag) + ] = "JetConstituentsUtils::get_dndx({}, {}, {}, pfcand_isChargedHad{})".format( + self.const, self.dndx, self.pftrack, self.tag + ) + + self.definition[ + "pfcand_mtof{}".format(self.tag) + ] = "JetConstituentsUtils::get_mtof({}, {}, {}, {}, {}, {}, {}, pv{})".format( + self.const, self.l, self.pftrack, self.trackerhits, self.pfphoton, self.pfnh, self.calohits, self.tag + ) + + self.definition["Bz{}".format(self.tag)] = "{}[0]".format(self.bz) + + self.definition[ + "pfcand_dxy{}".format(self.tag) + ] = "JetConstituentsUtils::XPtoPar_dxy({}, {}, pv{}, Bz{})".format( + self.const, self.trackstate, self.tag, self.tag + ) + + self.definition["pfcand_dz{}".format(self.tag)] = "JetConstituentsUtils::XPtoPar_dz({}, {}, pv{}, Bz{})".format( + self.const, self.trackstate, self.tag, self.tag + ) + + self.definition[ + "pfcand_phi0{}".format(self.tag) + ] = "JetConstituentsUtils::XPtoPar_phi({}, {}, pv{}, Bz{})".format( + self.const, self.trackstate, self.tag, self.tag + ) + + self.definition["pfcand_C{}".format(self.tag)] = "JetConstituentsUtils::XPtoPar_C({}, {}, Bz{})".format( + self.const, self.trackstate, self.tag + ) + + self.definition["pfcand_ct{}".format(self.tag)] = "JetConstituentsUtils::XPtoPar_ct({}, {}, Bz{})".format( + self.const, self.trackstate, self.tag + ) + + self.definition["pfcand_dptdpt{}".format(self.tag)] = "JetConstituentsUtils::get_omega_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition["pfcand_dxydxy{}".format(self.tag)] = "JetConstituentsUtils::get_d0_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition["pfcand_dzdz{}".format(self.tag)] = "JetConstituentsUtils::get_z0_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition["pfcand_dphidphi{}".format(self.tag)] = "JetConstituentsUtils::get_phi0_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition[ + "pfcand_detadeta{}".format(self.tag) + ] = "JetConstituentsUtils::get_tanlambda_cov({}, {})".format(self.const, self.trackstate) + + self.definition["pfcand_dxydz{}".format(self.tag)] = "JetConstituentsUtils::get_d0_z0_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition["pfcand_dphidxy{}".format(self.tag)] = "JetConstituentsUtils::get_phi0_d0_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition["pfcand_phidz{}".format(self.tag)] = "JetConstituentsUtils::get_phi0_z0_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition[ + "pfcand_phictgtheta{}".format(self.tag) + ] = "JetConstituentsUtils::get_tanlambda_phi0_cov({}, {})".format(self.const, self.trackstate) + + self.definition[ + "pfcand_dxyctgtheta{}".format(self.tag) + ] = "JetConstituentsUtils::get_tanlambda_d0_cov({}, {})".format(self.const, self.trackstate) + + self.definition[ + "pfcand_dlambdadz{}".format(self.tag) + ] = "JetConstituentsUtils::get_tanlambda_z0_cov({}, {})".format(self.const, self.trackstate) + + self.definition[ + "pfcand_cctgtheta{}".format(self.tag) + ] = "JetConstituentsUtils::get_omega_tanlambda_cov({}, {})".format(self.const, self.trackstate) + + self.definition["pfcand_phic{}".format(self.tag)] = "JetConstituentsUtils::get_omega_phi0_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition["pfcand_dxyc{}".format(self.tag)] = "JetConstituentsUtils::get_omega_d0_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition["pfcand_cdz{}".format(self.tag)] = "JetConstituentsUtils::get_omega_z0_cov({}, {})".format( + self.const, self.trackstate + ) + + self.definition[ + "pfcand_btagSip2dVal{}".format(self.tag) + ] = "JetConstituentsUtils::get_Sip2dVal_clusterV({}, pfcand_dxy{}, pfcand_phi0{}, Bz{})".format( + jet, self.tag, self.tag, self.tag + ) + + self.definition[ + "pfcand_btagSip2dSig{}".format(self.tag) + ] = "JetConstituentsUtils::get_Sip2dSig(pfcand_btagSip2dVal{}, pfcand_dxydxy{})".format(self.tag, self.tag) + + self.definition[ + "pfcand_btagSip3dVal{}".format(self.tag) + ] = "JetConstituentsUtils::get_Sip3dVal_clusterV({}, pfcand_dxy{}, pfcand_dz{}, pfcand_phi0{}, Bz{})".format( + jet, self.tag, self.tag, self.tag, self.tag + ) + + self.definition[ + "pfcand_btagSip3dSig{}".format(self.tag) + ] = "JetConstituentsUtils::get_Sip3dSig(pfcand_btagSip3dVal{}, pfcand_dxydxy{}, pfcand_dzdz{})".format( + self.tag, self.tag, self.tag + ) + + self.definition[ + "pfcand_btagJetDistVal{}".format(self.tag) + ] = "JetConstituentsUtils::get_JetDistVal_clusterV({}, {}, pfcand_dxy{}, pfcand_dz{}, pfcand_phi0{}, Bz{})".format( + jet, self.const, self.tag, self.tag, self.tag, self.tag + ) + + self.definition[ + "pfcand_btagJetDistSig{}".format(self.tag) + ] = "JetConstituentsUtils::get_JetDistSig(pfcand_btagJetDistVal{}, pfcand_dxydxy{}, pfcand_dzdz{})".format( + self.tag, self.tag, self.tag + ) + + self.definition["jet_nmu{}".format(self.tag)] = "JetConstituentsUtils::count_type(pfcand_isMu{})".format( + self.tag + ) + self.definition["jet_nel{}".format(self.tag)] = "JetConstituentsUtils::count_type(pfcand_isEl{})".format( + self.tag + ) + self.definition[ + "jet_nchad{}".format(self.tag) + ] = "JetConstituentsUtils::count_type(pfcand_isChargedHad{})".format(self.tag) + self.definition["jet_ngamma{}".format(self.tag)] = "JetConstituentsUtils::count_type(pfcand_isGamma{})".format( + self.tag + ) + self.definition[ + "jet_nnhad{}".format(self.tag) + ] = "JetConstituentsUtils::count_type(pfcand_isNeutralHad{})".format(self.tag) + + def define(self, df): + + for var, call in self.definition.items(): + df = df.Define(var, call) + + return df + + def inference(self, jsonCfg, onnxCfg, df): + + ## extract input variables/score name and ordering from json file + initvars, self.variables, self.scores = [], [], [] + f = open(jsonCfg) + data = json.load(f) + + for varname in data["pf_features"]["var_names"]: + initvars.append(varname) + self.variables.append("{}{}".format(varname, self.tag)) + + for varname in data["pf_vectors"]["var_names"]: + initvars.append(varname) + self.variables.append("{}{}".format(varname, self.tag)) + + for scorename in data["output_names"]: + # self.scores.append(scorename) + # self.scores.append(scorename.replace("jet", "jet{}".format(self.tag))) + self.scores.append("{}{}".format(scorename, self.tag)) + + f.close() + # convert to tuple + initvars = tuple(initvars) + + # then funcs + for varname in self.variables: + matches = [obs for obs in self.definition.keys() if obs == varname] + if len(matches) != 1: + print("ERROR: {} variables was not defined.".format(varname)) + sys.exit() + + self.get_weight_str = "JetFlavourUtils::get_weights(rdfslot_, " + for var in self.variables: + self.get_weight_str += "{},".format(var) + self.get_weight_str = "{})".format(self.get_weight_str[:-1]) + + from ROOT import JetFlavourUtils + + weaver = JetFlavourUtils.setup_weaver( + onnxCfg, # name of the trained model exported + jsonCfg, # .json file produced by weaver during training + initvars, + ROOT.GetThreadPoolSize() if ROOT.GetThreadPoolSize() > 0 else 1, + ) + + # run inference and cast scores + df = df.Define("MVAVec_{}".format(self.tag), self.get_weight_str) + + for i, scorename in enumerate(self.scores): + df = df.Define(scorename, "JetFlavourUtils::get_weight(MVAVec_{}, {})".format(self.tag, i)) + + return df + + def outputBranches(self): + + out = self.scores + out += [obs for obs in self.definition.keys() if "jet_" in obs] + return out diff --git a/addons/ONNXRuntime/test/onnxtest.cpp b/addons/ONNXRuntime/test/onnxtest.cpp index 04f60c07d9..f9ad7b8b26 100644 --- a/addons/ONNXRuntime/test/onnxtest.cpp +++ b/addons/ONNXRuntime/test/onnxtest.cpp @@ -11,9 +11,10 @@ TEST_CASE("flavtagging", "[onnx]") { FCCAnalyses::JetFlavourUtils::setup_weaver(TEST_FILE("fccee_flavtagging_dummy.onnx"), TEST_FILE("preprocess.json"), - {"pfcand_e", "pfcand_theta", "pfcand_phi", "pfcand_pid", "pfcand_charge"}); + {"pfcand_e", "pfcand_theta", "pfcand_phi", "pfcand_pid", "pfcand_charge"}, 1); - const auto out = FCCAnalyses::JetFlavourUtils::compute_weights( + unsigned int slot = 0; + const auto out = FCCAnalyses::JetFlavourUtils::compute_weights(slot, {{{1.38285, 19.3685}}, {{1.97631, 1.7312}}, {{-1.50803, -1.36646}}, {{0, 0}}, {{1, -1}}}); REQUIRE(out.size() == 1); // single jet -> single collection of weights diff --git a/analyzers/dataframe/CMakeLists.txt b/analyzers/dataframe/CMakeLists.txt index 54141b8fe9..1c5cfb32d3 100644 --- a/analyzers/dataframe/CMakeLists.txt +++ b/analyzers/dataframe/CMakeLists.txt @@ -4,17 +4,21 @@ find_package(Vdt) -message(STATUS "includes-------------------------- dataframe awkward: ${AWKWARD_INCLUDE}") message(STATUS "includes-------------------------- dataframe edm4hep: ${EDM4HEP_INCLUDE_DIRS}") message(STATUS "includes-------------------------- dataframe podio : ${podio_INCLUDE_DIR}") +message(STATUS "includes-------------------------- dataframe delphes: ${DELPHES_INCLUDE_DIR}") +message(STATUS "includes-------------------------- dataframe delphes EXt TrkCov: ${DELPHES_EXTERNALS_TKCOV_INCLUDE_DIR}") +message(STATUS "includes-------------------------- dataframe delphes EXt: ${DELPHES_EXTERNALS_INCLUDE_DIR}") + +include_directories(${DELPHES_INCLUDE_DIR} + ${DELPHES_EXTERNALS_INCLUDE_DIR} + ${DELPHES_EXTERNALS_TKCOV_INCLUDE_DIR} + ) file(GLOB sources src/*.cc) file(GLOB headers RELATIVE ${CMAKE_CURRENT_LIST_DIR} FCCAnalyses/*.h) -message(STATUS "includes headers ${headers}") -message(STATUS "includes sources ${sources}") - list(FILTER headers EXCLUDE REGEX "LinkDef.h") if(NOT WITH_DD4HEP) list(FILTER headers EXCLUDE REGEX "CaloNtupleizer.h") @@ -23,6 +27,8 @@ endif() if(NOT WITH_ONNX) list(FILTER headers EXCLUDE REGEX "JetFlavourUtils.h") list(FILTER sources EXCLUDE REGEX "JetFlavourUtils.cc") + list(FILTER headers EXCLUDE REGEX "WeaverUtils.h") + list(FILTER sources EXCLUDE REGEX "WeaverUtils.cc") endif() if(NOT WITH_ACTS) @@ -32,6 +38,8 @@ if(NOT WITH_ACTS) list(FILTER sources EXCLUDE REGEX "VertexFinderActs.cc") endif() +message(STATUS "includes headers ${headers}") +message(STATUS "includes sources ${sources}") message(STATUS "CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}") message(STATUS "CMAKE_INSTALL_INCLUDEDIR ${CMAKE_INSTALL_INCLUDEDIR}") @@ -42,9 +50,13 @@ target_include_directories(FCCAnalyses PUBLIC $ $ ${VDT_INCLUDE_DIR} - ${AWKWARD_INCLUDE} + ${DELPHES_INCLUDE_DIR} + ${DELPHES_EXTERNALS_INCLUDE_DIR} + ${DELPHES_EXTERNALS_TKCOV_INCLUDE_DIR} ) +message(STATUS " ====== DELPHES LIBRARY = " ${DELPHES_LIBRARY} ) +message(STATUS " ====== DELPHES_EXTERNALS_TKCOV_INCLUDE_DIR = " ${DELPHES_EXTERNALS_TKCOV_INCLUDE_DIR} ) target_link_libraries(FCCAnalyses @@ -55,10 +67,7 @@ target_link_libraries(FCCAnalyses EDM4HEP::edm4hep EDM4HEP::edm4hepDict podio::podio - #${AWKWARD_LIBRARIES} - ${LIBAWKWARD} - ${CPU-KERNELS} - ${LIBDL} + ${DELPHES_LIBRARY} ${ADDONS_LIBRARIES} gfortran # todo: why necessary? ) diff --git a/analyzers/dataframe/FCCAnalyses/Algorithms.h b/analyzers/dataframe/FCCAnalyses/Algorithms.h index 19e6eb58e1..bd1aea06f7 100644 --- a/analyzers/dataframe/FCCAnalyses/Algorithms.h +++ b/analyzers/dataframe/FCCAnalyses/Algorithms.h @@ -6,6 +6,8 @@ #include "edm4hep/ReconstructedParticleData.h" +#include "FastJet/JetClustering.h" + //#include "TFitter.h" #include "Math/Minimizer.h" #include "ROOT/RVec.hxx" @@ -189,6 +191,20 @@ namespace Algorithms{ /// Get the invariant mass from a list of reconstructed particles float getMass(const ROOT::VecOps::RVec & in); + /// make "jets" by splitting the events into two hemisphere transverse to the thrust axis. + struct jets_TwoHemispheres { + int m_sorted=0; ///< pT ordering=0, E ordering=1 + int m_recombination = 0; ///< E_scheme=0, pt_scheme=1, pt2_scheme=2, Et_scheme=3, Et2_scheme=4, BIpt_scheme=5, BIpt2_scheme=6, E0_scheme=10, p_scheme=11 + jets_TwoHemispheres( int arg_sorted, int arg_recombination ) ; + JetClustering::FCCAnalysesJet operator() ( + const ROOT::VecOps::RVec & RP_px, + const ROOT::VecOps::RVec & RP_py, + const ROOT::VecOps::RVec & RP_pz, + const ROOT::VecOps::RVec & RP_e, + const ROOT::VecOps::RVec & RP_costheta ) ; + } ; + + ///@} }//end NS Algorithms diff --git a/analyzers/dataframe/FCCAnalyses/JetClusteringUtils.h b/analyzers/dataframe/FCCAnalyses/JetClusteringUtils.h index c0c7aa462c..7e3cd584a7 100644 --- a/analyzers/dataframe/FCCAnalyses/JetClusteringUtils.h +++ b/analyzers/dataframe/FCCAnalyses/JetClusteringUtils.h @@ -78,12 +78,23 @@ namespace FCCAnalyses { /** Get jet eta. Details. */ ROOT::VecOps::RVec get_eta(const ROOT::VecOps::RVec& in); - /** Get jet phi. Details. */ + /** Get jet phi. Details (range [0,2*pi]). */ ROOT::VecOps::RVec get_phi(const ROOT::VecOps::RVec& in); + /** Get jet phi. Details (range [-pi,pi]). */ + ROOT::VecOps::RVec get_phi_std(const ROOT::VecOps::RVec& in); + + /** Get jet theta. Details. */ ROOT::VecOps::RVec get_theta(const ROOT::VecOps::RVec& in); + ///Select clustered jets with transverse momentum greader than a minimum value [GeV] + struct sel_pt { + sel_pt(float arg_min_pt); + float m_min_pt = 1.; //> transverse momentum threshold [GeV] + ROOT::VecOps::RVec operator() (ROOT::VecOps::RVec in); + }; + ///Internal methods JetClustering::FCCAnalysesJet initialise_FCCAnalysesJet(); @@ -99,6 +110,19 @@ namespace FCCAnalyses { std::vector exclusive_dmerge(fastjet::ClusterSequence& cs, int do_dmarge_max); + // build the resonance from 2 objects. Keep the closest to the mass given as input + struct resonanceBuilder { + float m_resonance_mass; + resonanceBuilder(float arg_resonance_mass); + ROOT::VecOps::RVec operator()(ROOT::VecOps::RVec legs); + }; + + struct recoilBuilder { + recoilBuilder(float arg_sqrts); + float m_sqrts = 240.0; + double operator() (ROOT::VecOps::RVec in); + }; + ///@} } // namespace JetClusteringUtils diff --git a/analyzers/dataframe/FCCAnalyses/JetConstituentsUtils.h b/analyzers/dataframe/FCCAnalyses/JetConstituentsUtils.h index 22909c8dfe..8db72cb9f4 100644 --- a/analyzers/dataframe/FCCAnalyses/JetConstituentsUtils.h +++ b/analyzers/dataframe/FCCAnalyses/JetConstituentsUtils.h @@ -40,12 +40,13 @@ namespace FCCAnalyses { const ROOT::VecOps::RVec& tracks); rv::RVec get_pt(const rv::RVec&); + rv::RVec get_p(const rv::RVec&); rv::RVec get_e(const rv::RVec&); rv::RVec get_theta(const rv::RVec&); rv::RVec get_phi(const rv::RVec&); rv::RVec get_type(const rv::RVec&); rv::RVec get_charge(const rv::RVec&); - + //displacement rv::RVec get_d0(const rv::RVec&, const ROOT::VecOps::RVec&); @@ -62,39 +63,37 @@ namespace FCCAnalyses { rv::RVec get_tanLambda(const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); - + rv::RVec XPtoPar_dxy(const rv::RVec&, const ROOT::VecOps::RVec&, - const TVector3&, + const TLorentzVector& V, // primary vertex const float&); rv::RVec XPtoPar_dz(const rv::RVec&, const ROOT::VecOps::RVec&, - const TVector3&, + const TLorentzVector& V, // primary vertex const float&); rv::RVec XPtoPar_phi(const rv::RVec&, const ROOT::VecOps::RVec&, - const TVector3&, + const TLorentzVector& V, // primary vertex const float&); rv::RVec XPtoPar_C(const rv::RVec&, const ROOT::VecOps::RVec&, - const TVector3&, const float&); rv::RVec XPtoPar_ct(const rv::RVec&, const ROOT::VecOps::RVec&, - const TVector3&, const float&); //covariance matrix //diagonal rv::RVec get_omega_cov(const rv::RVec&, const ROOT::VecOps::RVec&); - + rv::RVec get_d0_cov(const rv::RVec&, const ROOT::VecOps::RVec& ); - + rv::RVec get_z0_cov(const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); - + rv::RVec get_phi0_cov(const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); @@ -106,7 +105,7 @@ namespace FCCAnalyses { rv::RVec get_phi0_d0_cov(const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); - + rv::RVec get_phi0_z0_cov(const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); @@ -115,7 +114,7 @@ namespace FCCAnalyses { rv::RVec get_tanlambda_d0_cov(const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); - + rv::RVec get_tanlambda_z0_cov(const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); @@ -127,11 +126,11 @@ namespace FCCAnalyses { rv::RVec get_omega_d0_cov(const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); - + rv::RVec get_omega_z0_cov(const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); - + rv::RVec get_dndx(const rv::RVec& jcs, const rv::RVec& dNdx, const rv::RVec& trackdata, @@ -149,7 +148,6 @@ namespace FCCAnalyses { rv::RVec get_Sip2dVal_clusterV(const rv::RVec& jets, const rv::RVec& D0, const rv::RVec& phi0, - const TVector3& V, const float Bz); @@ -160,7 +158,7 @@ namespace FCCAnalyses { const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); - + rv::RVec get_Sip3dVal_cluster(const rv::RVec& jets, const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); @@ -169,17 +167,16 @@ namespace FCCAnalyses { const rv::RVec& D0, const rv::RVec& Z0, const rv::RVec& phi0, - const TVector3& V, const float Bz); rv::RVec get_Sip3dSig(const rv::RVec& Sip3dVals, const rv::RVec& err2_D0, const rv::RVec& err2_Z0); - + rv::RVec get_JetDistVal(const rv::RVec& jets, const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); - + rv::RVec get_JetDistVal_cluster(const rv::RVec& jets, const rv::RVec& jcs, const ROOT::VecOps::RVec& tracks); @@ -189,39 +186,40 @@ namespace FCCAnalyses { const rv::RVec& D0, const rv::RVec& Z0, const rv::RVec& phi0, - const TVector3& V, const float Bz); rv::RVec get_JetDistSig(const rv::RVec& JetDistVal, const rv::RVec& err2_D0, const rv::RVec& err2_Z0); - + rv::RVec get_mtof(const rv::RVec& jcs, const rv::RVec& track_L, const rv::RVec& trackdata, const rv::RVec& trackerhits, - const rv::RVec JetsConstituents_isChargedHad); + const rv::RVec& gammadata, + const rv::RVec& nhdata, + const rv::RVec& calohits, + const TLorentzVector& V // primary vertex + ); rv::RVec get_PIDs(const ROOT::VecOps::RVec< int > recin, - const ROOT::VecOps::RVec< int > mcin, + const ROOT::VecOps::RVec< int > mcin, const rv::RVec& RecPart, const rv::RVec& Particle, const rv::RVec& Jets); - + rv::RVec get_PIDs_cluster(const ROOT::VecOps::RVec< int > recin, const ROOT::VecOps::RVec< int > mcin, const rv::RVec& RecPart, const rv::RVec& Particle, const std::vector>& indices); - rv::RVec get_isMu(const rv::RVec&); - rv::RVec get_isEl(const rv::RVec&); - rv::RVec get_isChargedHad(const rv::RVec& PIDs, - const rv::RVec& jcs); - rv::RVec get_isGamma(const rv::RVec&); - rv::RVec get_isNeutralHad(const rv::RVec& PIDs, - const rv::RVec& jcs); + rv::RVec get_isMu(const rv::RVec& jcs); + rv::RVec get_isEl(const rv::RVec& jcs); + rv::RVec get_isChargedHad(const rv::RVec& jcs); + rv::RVec get_isGamma(const rv::RVec& jcs); + rv::RVec get_isNeutralHad(const rv::RVec& jcs); //countings int count_jets(rv::RVec jets); @@ -229,12 +227,12 @@ namespace FCCAnalyses { rv::RVec count_type(const rv::RVec& isType); - + rv::RVec get_erel(const rv::RVec& jets, const rv::RVec& jcs); rv::RVec get_erel_cluster(const rv::RVec& jets, const rv::RVec& jcs); - + rv::RVec get_erel_log(const rv::RVec& jets, const rv::RVec& jcs); rv::RVec get_erel_log_cluster(const rv::RVec& jets, @@ -249,22 +247,23 @@ namespace FCCAnalyses { const rv::RVec& jcs); rv::RVec get_phirel_cluster(const rv::RVec& jets, const rv::RVec& jcs); - + //residues rv::RVec compute_tlv_jets(const rv::RVec& jets); rv::RVec sum_tlv_constituents(const rv::RVec& jets); float InvariantMass(const TLorentzVector& tlv1, const TLorentzVector& tlv2); - rv::RVec compute_residue_energy(const rv::RVec& tlv_jet, + rv::RVec all_invariant_masses(rv::RVec AllJets); // invariant masses of all jet pairs given a vector of jets + rv::RVec compute_residue_energy(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs); - rv::RVec compute_residue_pt(const rv::RVec& tlv_jet, + rv::RVec compute_residue_pt(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs); - rv::RVec compute_residue_phi(const rv::RVec& tlv_jet, + rv::RVec compute_residue_phi(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs); - rv::RVec compute_residue_theta(const rv::RVec& tlv_jet, + rv::RVec compute_residue_theta(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs); rv::RVec compute_residue_px(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs); rv::RVec compute_residue_py(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs); - rv::RVec compute_residue_pz(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs); + rv::RVec compute_residue_pz(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs); } // namespace JetConstituentsUtils } // namespace FCCAnalyses diff --git a/analyzers/dataframe/FCCAnalyses/JetFlavourUtils.h b/analyzers/dataframe/FCCAnalyses/JetFlavourUtils.h index 5d1dcd0d1b..c9986f7407 100644 --- a/analyzers/dataframe/FCCAnalyses/JetFlavourUtils.h +++ b/analyzers/dataframe/FCCAnalyses/JetFlavourUtils.h @@ -11,14 +11,14 @@ namespace FCCAnalyses { /// Compute all weights given a collection of input variables /// \note This helper should not be used directly in RDataFrame examples - rv::RVec > compute_weights(const rv::RVec&); + rv::RVec > compute_weights(unsigned int slot, const rv::RVec&); /// Setup the ONNXRuntime instance using Weaver-provided parameters - void setup_weaver(const std::string&, const std::string&, const rv::RVec&); + void setup_weaver(const std::string&, const std::string&, const rv::RVec&, unsigned int nSlots); /// Compute all weights given an unspecified collection of input variables template - ROOT::VecOps::RVec > get_weights(Args&&... args) { - return compute_weights(std::vector{std::forward(args)...}); + ROOT::VecOps::RVec > get_weights(unsigned int slot, Args&&... args) { + return compute_weights(slot, std::vector{std::forward(args)...}); } /// Get one specific weight previously computed rv::RVec get_weight(const rv::RVec >&, int); diff --git a/analyzers/dataframe/FCCAnalyses/JetTaggingUtils.h b/analyzers/dataframe/FCCAnalyses/JetTaggingUtils.h index ba457cd43f..ac707c2739 100644 --- a/analyzers/dataframe/FCCAnalyses/JetTaggingUtils.h +++ b/analyzers/dataframe/FCCAnalyses/JetTaggingUtils.h @@ -1,45 +1,60 @@ -#ifndef JETTAGGINGUTILS_ANALYZERS_H -#define JETTAGGINGUTILS_ANALYZERS_H - -#include -#include "Math/Vector4D.h" -#include "ROOT/RVec.hxx" -#include "edm4hep/MCParticleData.h" -#include "fastjet/JetDefinition.hh" -#include "TRandom3.h" - -/** Jet tagging utilities interface. -This represents a set functions and utilities to perfom jet tagging from a list of jets. -*/ -namespace FCCAnalyses{ - -namespace JetTaggingUtils{ - - /** @name JetTaggingUtils - * Jet tagging interface utilities. - */ - - //Get flavour association of jet - ROOT::VecOps::RVec get_flavour(ROOT::VecOps::RVec in, ROOT::VecOps::RVec MCin); - //Get b-tags with an efficiency applied - ROOT::VecOps::RVec get_btag(ROOT::VecOps::RVec in, float efficiency, float mistag_c=0., float mistag_l=0., float mistag_g=0.); - //Get c-tags with an efficiency applied - ROOT::VecOps::RVec get_ctag(ROOT::VecOps::RVec in, float efficiency, float mistag_b=0., float mistag_l=0., float mistag_g=0.); - //Get l-tags with an efficiency applied - ROOT::VecOps::RVec get_ltag(ROOT::VecOps::RVec in, float efficiency, float mistag_b=0., float mistag_c=0., float mistag_g=0.); - //Get g-tags with an efficiency applied - ROOT::VecOps::RVec get_gtag(ROOT::VecOps::RVec in, float efficiency, float mistag_b=0., float mistag_c=0., float mistag_l=0.); - - /// select a list of jets depending on the status of a certain boolean flag (corresponding to its tagging state) - struct sel_tag { - bool m_pass; // if pass is true, select tagged jets. Otherwise select anti-tagged ones - sel_tag(bool arg_pass); - ROOT::VecOps::RVec operator() (ROOT::VecOps::RVec tags, ROOT::VecOps::RVec in); - }; - - ///@} -}//end NS JetTaggingUtils - -}//end NS FCCAnalyses - -#endif +#ifndef JETTAGGINGUTILS_ANALYZERS_H +#define JETTAGGINGUTILS_ANALYZERS_H + +#include "Math/Vector4D.h" +#include "ROOT/RVec.hxx" +#include "TRandom3.h" +#include "edm4hep/MCParticleData.h" +#include "fastjet/JetDefinition.hh" +#include + +/** Jet tagging utilities interface. +This represents a set functions and utilities to perfom jet tagging from a list +of jets. +*/ +namespace FCCAnalyses { + +namespace JetTaggingUtils { + +/** @name JetTaggingUtils + * Jet tagging interface utilities. + */ + +// Get flavour association of jet +ROOT::VecOps::RVec +get_flavour(ROOT::VecOps::RVec in, + ROOT::VecOps::RVec MCin); +// Get b-tags with an efficiency applied +ROOT::VecOps::RVec get_btag(ROOT::VecOps::RVec in, float efficiency, + float mistag_c = 0., float mistag_l = 0., + float mistag_g = 0.); +// Get c-tags with an efficiency applied +ROOT::VecOps::RVec get_ctag(ROOT::VecOps::RVec in, float efficiency, + float mistag_b = 0., float mistag_l = 0., + float mistag_g = 0.); +// Get l-tags with an efficiency applied +ROOT::VecOps::RVec get_ltag(ROOT::VecOps::RVec in, float efficiency, + float mistag_b = 0., float mistag_c = 0., + float mistag_g = 0.); +// Get g-tags with an efficiency applied +ROOT::VecOps::RVec get_gtag(ROOT::VecOps::RVec in, float efficiency, + float mistag_b = 0., float mistag_c = 0., + float mistag_l = 0.); + +/// select a list of jets depending on the status of a certain boolean flag +/// (corresponding to its tagging state) +struct sel_tag { + bool m_pass; // if pass is true, select tagged jets. Otherwise select + // anti-tagged ones + sel_tag(bool arg_pass); + ROOT::VecOps::RVec + operator()(ROOT::VecOps::RVec tags, + ROOT::VecOps::RVec in); +}; + +///@} +} // namespace JetTaggingUtils + +} // namespace FCCAnalyses + +#endif diff --git a/analyzers/dataframe/FCCAnalyses/LinkDef.h b/analyzers/dataframe/FCCAnalyses/LinkDef.h index ae6af9a7ee..23b891b186 100644 --- a/analyzers/dataframe/FCCAnalyses/LinkDef.h +++ b/analyzers/dataframe/FCCAnalyses/LinkDef.h @@ -12,6 +12,8 @@ #pragma link C++ class ROOT::VecOps::RVec+; #pragma link C++ class std::vector>+; +#pragma link C++ class std::vector>+; +#pragma link C++ class std::vector>+; #pragma link C++ class ROOT::VecOps::RVec+; #pragma link C++ class ROOT::VecOps::RVec+; #pragma link C++ class ROOT::VecOps::RVec+; @@ -21,10 +23,17 @@ #pragma link C++ class ROOT::VecOps::RVec+; #pragma link C++ class ROOT::VecOps::RVec+; #pragma link C++ class ROOT::VecOps::RVec+; +#pragma link C++ class ROOT::VecOps::RVec>+; #pragma link C++ class ROOT::VecOps::RVec>+; +#pragma link C++ class ROOT::VecOps::RVec>+; #pragma link C++ class ROOT::VecOps::RVec>+; #pragma link C++ class ROOT::VecOps::RVec>+; +#pragma link C++ class ROOT::VecOps::RVec+; +#pragma link C++ class ROOT::VecOps::RVec>+; +#pragma link C++ class ROOT::VecOps::RVec>+; +#pragma link C++ class ROOT::VecOps::RVec>+; + //to load all other functions #pragma link C++ function dummyLoader; diff --git a/analyzers/dataframe/FCCAnalyses/MCParticle.h b/analyzers/dataframe/FCCAnalyses/MCParticle.h index 9518974952..02ec12761f 100644 --- a/analyzers/dataframe/FCCAnalyses/MCParticle.h +++ b/analyzers/dataframe/FCCAnalyses/MCParticle.h @@ -74,6 +74,13 @@ namespace MCParticle{ TVector3 operator() (ROOT::VecOps::RVec in); }; + /// return the event primary vertex position and time (mm) + struct get_EventPrimaryVertexP4 { + get_EventPrimaryVertexP4(); + int m_genstatus = 21; // Pythia8 code of the incoming particles of the hardest subprocess + TLorentzVector operator() (ROOT::VecOps::RVec in); + }; + /// return a list of indices that correspond to a given MC decay. The list contains the index of the mother, followed by the indices of the daughters, in the order specified. If m_inclusiveDecay is true, the list of daughters is the minimum required for the mother's decay (otherwise, the list is the exact daughters required for the mother's decay). In case there are several such decays in the event, keep only the first one. struct get_indices{ diff --git a/analyzers/dataframe/FCCAnalyses/ReconstructedParticle.h b/analyzers/dataframe/FCCAnalyses/ReconstructedParticle.h index 1d330a64d5..8236e4100f 100644 --- a/analyzers/dataframe/FCCAnalyses/ReconstructedParticle.h +++ b/analyzers/dataframe/FCCAnalyses/ReconstructedParticle.h @@ -42,6 +42,13 @@ namespace ReconstructedParticle{ ROOT::VecOps::RVec operator() (ROOT::VecOps::RVec in); }; + /// select ReconstructedParticles with absolute pseudorapidity less than a maximum absolute value + struct sel_eta { + sel_eta(float arg_min_eta); + float m_min_eta = 2.5; //> pseudorapidity threshold + ROOT::VecOps::RVec operator() (ROOT::VecOps::RVec in); + }; + /// select ReconstructedParticles with momentum greater than a minimum value [GeV] struct sel_p { sel_p(float arg_min_p, float arg_max_p = 1e10); @@ -126,6 +133,9 @@ namespace ReconstructedParticle{ /// return the TlorentzVector of the one input ReconstructedParticle TLorentzVector get_tlv(edm4hep::ReconstructedParticleData in); + /// return visible 4-momentum vector + TLorentzVector get_P4vis(ROOT::VecOps::RVec in); + /// concatenate both input vectors and return the resulting vector ROOT::VecOps::RVec merge(ROOT::VecOps::RVec x, ROOT::VecOps::RVec y); diff --git a/analyzers/dataframe/FCCAnalyses/ReconstructedParticle2Track.h b/analyzers/dataframe/FCCAnalyses/ReconstructedParticle2Track.h index 507276f137..48f76453c0 100644 --- a/analyzers/dataframe/FCCAnalyses/ReconstructedParticle2Track.h +++ b/analyzers/dataframe/FCCAnalyses/ReconstructedParticle2Track.h @@ -6,10 +6,15 @@ #include #include "ROOT/RVec.hxx" +#include "edm4hep/Quantity.h" #include "edm4hep/ReconstructedParticleData.h" +#include "edm4hep/TrackData.h" #include "edm4hep/TrackState.h" +#include "edm4hep/TrackerHitData.h" #include #include +#include + #include #include @@ -17,37 +22,42 @@ namespace FCCAnalyses{ namespace ReconstructedParticle2Track{ + /// Return the momentum of a track to a reconstructed particle + ROOT::VecOps::RVec getRP2TRK_mom (ROOT::VecOps::RVec in, + ROOT::VecOps::RVec tracks); + + /// Return the charge of a track to a reconstructed particle + ROOT::VecOps::RVec getRP2TRK_charge(ROOT::VecOps::RVec in, + ROOT::VecOps::RVec tracks); + //compute the magnetic field Bz - ROOT::VecOps::RVec getRP2TRK_Bz(const ROOT::VecOps::RVec& rps, + ROOT::VecOps::RVec getRP2TRK_Bz(const ROOT::VecOps::RVec& rps, const ROOT::VecOps::RVec& tracks); //here computed for all particles passed - float Bz(const ROOT::VecOps::RVec& rps, + float Bz(const ROOT::VecOps::RVec& rps, const ROOT::VecOps::RVec& tracks); //here only computed for the first charged particle encountered - - ROOT::VecOps::RVec XPtoPar_dxy(const ROOT::VecOps::RVec& in, + ROOT::VecOps::RVec XPtoPar_dxy(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& x, - const float& Bz); + const TLorentzVector& V, // primary vertex + const float& Bz); ROOT::VecOps::RVec XPtoPar_dz(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& V, + const TLorentzVector& V, // primary vertex const float& Bz); ROOT::VecOps::RVec XPtoPar_phi(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& V, + const TLorentzVector& V, // primary vertex const float& Bz); ROOT::VecOps::RVec XPtoPar_C(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& V, const float& Bz); ROOT::VecOps::RVec XPtoPar_ct(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& V, const float& Bz); /// Return the D0 of a track to a reconstructed particle @@ -144,9 +154,16 @@ namespace ReconstructedParticle2Track{ ROOT::VecOps::RVec getRP2TRK( ROOT::VecOps::RVec in, ROOT::VecOps::RVec tracks ) ; + /// Return the reco indices of particles that have tracks + ROOT::VecOps::RVec get_recoindTRK( ROOT::VecOps::RVec in, + ROOT::VecOps::RVec tracks ) ; + /// Return the size of a collection of TrackStates int getTK_n(ROOT::VecOps::RVec x) ; + /// Return if a Reco particle have an associated track + ROOT::VecOps::RVec hasTRK( ROOT::VecOps::RVec in ) ; + }//end NS ReconstructedParticle2Track }//end NS FCCAnalyses diff --git a/analyzers/dataframe/FCCAnalyses/SmearObjects.h b/analyzers/dataframe/FCCAnalyses/SmearObjects.h new file mode 100644 index 0000000000..eb2d2e498c --- /dev/null +++ b/analyzers/dataframe/FCCAnalyses/SmearObjects.h @@ -0,0 +1,101 @@ +#ifndef SMEARING_ANALYZERS_H +#define SMEARING_ANALYZERS_H + +#include +#include + +#include "FCCAnalyses/ReconstructedParticle2Track.h" +#include "ROOT/RVec.hxx" +#include "TLorentzVector.h" +#include "TMatrixDSym.h" +#include "TRandom.h" +#include "edm4hep/MCParticleData.h" +#include + +namespace FCCAnalyses { + +namespace SmearObjects { + +/// for a given MC particle, returns a "track state", i.e. a vector of 5 helix +/// parameters, in Delphes convention +TVectorD TrackParamFromMC_DelphesConv(edm4hep::MCParticleData aMCParticle); + +/// generates new track states, by rescaling the covariance matrix of the tracks +struct SmearedTracks { + bool m_debug; + TRandom m_random; + float m_smear_parameters[5]; + SmearedTracks(float smear_d0, float smear_phi, float smear_omega, + float smear_z0, float smear_tlambda, bool debug); + ROOT::VecOps::RVec + operator()(const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &alltracks, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles); +}; + +/// used to validate the method above. Stores the "MC-truth track states", in a +/// collection that runs parallel to the full collection of tracks. +ROOT::VecOps::RVec mcTrackParameters( + const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &alltracks, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles); + +/// generates random values for a vector, given the covariance matrix of its +/// components, using a Choleski decomposition. Code from Franco Bedeschi +TVectorD CovSmear(TVectorD x, TMatrixDSym C, TRandom *ran, bool debug); + +/// generates new track dNdx, by rescaling the poisson error of the cluster +/// count +struct SmearedTracksdNdx { + bool m_debug; + TRandom m_random; + float m_scale; + SmearedTracksdNdx(float m_scale, bool debug); + ROOT::VecOps::RVec + operator()(const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &dNdx, + const ROOT::VecOps::RVec &length, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles); +}; + +/// generates new tracker hits, by rescaling the timing measurement +struct SmearedTracksTOF { + bool m_debug; + TRandom m_random; + float m_scale; + SmearedTracksTOF(float m_scale, bool debug); + ROOT::VecOps::RVec + operator()(const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &trackdata, + const ROOT::VecOps::RVec &trackerhits, + const ROOT::VecOps::RVec &length, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles); +}; + +/// generates new reco particles, smeared by given parameters +struct SmearedReconstructedParticle { + bool m_debug; + float m_scale; + int m_type; + int m_mode; + SmearedReconstructedParticle(float scale, int type, int mode, bool debug); + + ROOT::VecOps::RVec + operator()(const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles); +}; + +} // namespace SmearObjects +} // namespace FCCAnalyses + +#endif diff --git a/analyzers/dataframe/FCCAnalyses/VertexFinderLCFIPlus.h b/analyzers/dataframe/FCCAnalyses/VertexFinderLCFIPlus.h new file mode 100644 index 0000000000..396102fe25 --- /dev/null +++ b/analyzers/dataframe/FCCAnalyses/VertexFinderLCFIPlus.h @@ -0,0 +1,196 @@ +#ifndef VERTEXFINDERLCFIPLUS_ANALYZERS_H +#define VERTEXFINDERLCFIPLUS_ANALYZERS_H + +#include +#include + +#include "ROOT/RVec.hxx" +#include "edm4hep/ReconstructedParticleData.h" +#include "edm4hep/TrackState.h" + +#include "FCCAnalyses/ReconstructedParticle2Track.h" +#include "FCCAnalyses/VertexFitterSimple.h" +#include "FCCAnalyses/VertexingUtils.h" + +#include "fastjet/JetDefinition.hh" + +/** Primary and Seconday Vertex Finder interface using vertex fitter from VertexFitterSimple. +This represents a set functions and utilities to find vertices from a list of tracks following the algorithm from LCFIPlus framework. +*/ + +namespace FCCAnalyses{ + +namespace VertexFinderLCFIPlus{ + + /** returns SVs reconstructed from non-primary tracks of jets + * non-primary separated from all tracks using isInPrimary (bool) vector + * currently not separating SVs by jet + */ + ROOT::VecOps::RVec> get_SV_jets( ROOT::VecOps::RVec recoparticles, + ROOT::VecOps::RVec thetracks, + VertexingUtils::FCCAnalysesVertex PV, + ROOT::VecOps::RVec isInPrimary, + ROOT::VecOps::RVec jets, + std::vector> jet_consti, + bool V0_rej=true, + double chi2_cut=9., double invM_cut=10., double chi2Tr_cut=5. ) ; + + /** returns SVs reconstructed from non-primary tracks of the event + * SV finding done before jet clustering + * non-primary separated from all tracks using isInPrimary (bool) vector + */ + ROOT::VecOps::RVec get_SV_event( ROOT::VecOps::RVec recoparticles, + ROOT::VecOps::RVec thetracks, + VertexingUtils::FCCAnalysesVertex PV, + ROOT::VecOps::RVec isInPrimary, + bool V0_rej=true, + double chi2_cut=9., double invM_cut=10., double chi2Tr_cut=5. ) ; + + /** returns SVs reconstructed from non-primary tracks of the event + * SV finding done before jet clustering + */ + ROOT::VecOps::RVec get_SV_event( ROOT::VecOps::RVec np_tracks, + ROOT::VecOps::RVec thetracks, + VertexingUtils::FCCAnalysesVertex PV, + bool V0_rej=true, + double chi2_cut=9., double invM_cut=10., double chi2Tr_cut=5. ) ; + + /** returns indices of the best pair of tracks from a vector of (non-primary) tracks + * default chi2 threshold is 9 and default invariant mass threshold is 10GeV + */ + ROOT::VecOps::RVec VertexSeed_best( ROOT::VecOps::RVec tracks, + VertexingUtils::FCCAnalysesVertex PV, + double chi2_cut=9., double invM_cut=10.) ; + + /** adds index of the best track (from the remaining tracks) to the (seed) vtx + * default chi2 threshold is 9 and default invariant mass threshold is 10GeV + * default threshold for track's chi2 contribution is 5 (?) + */ + ROOT::VecOps::RVec addTrack_best( ROOT::VecOps::RVec tracks, + ROOT::VecOps::RVec vtx_tr, + VertexingUtils::FCCAnalysesVertex PV, + double chi2_cut=9., double invM_cut=10., double chi2Tr_cut=5.) ; + + /** V0 rejection (tight) + * takes all (non-primary tracks) & removes tracks coming from V0s if user chooses + * by default V0 rejection is done + */ + ROOT::VecOps::RVec V0rejection_tight( ROOT::VecOps::RVec tracks, + VertexingUtils::FCCAnalysesVertex PV, + bool V0_rej=true ) ; + + /** find SVs from a set of tracks + * default values of thresholds for the constraints are set + */ + ROOT::VecOps::RVec findSVfromTracks( ROOT::VecOps::RVec tracks_fin, + const ROOT::VecOps::RVec& alltracks, + VertexingUtils::FCCAnalysesVertex PV, + double chi2_cut=9., double invM_cut=10., double chi2Tr_cut=5.) ; + + /** check constraints of vertex candidates + * default values of thresholds for the constraints are set + * default constraint check is that for finding vertex seed + * seed=true -> constraints for seed; seed=false -> constraints for adding tracks + */ + bool check_constraints( VertexingUtils::FCCAnalysesVertex vtx, + ROOT::VecOps::RVec tracks, + VertexingUtils::FCCAnalysesVertex PV, + bool seed=true, + double chi2_cut=9., double invM_cut=10., double chi2Tr_cut=5.) ; + + /** V0 rejection/identification + * takes all (non-primary) tracks & assigns "true" to pairs that form a V0 + * if(tight) -> tight constraints + * if(!tight) -> loose constraints + * by default loose constraints + */ + ROOT::VecOps::RVec isV0( ROOT::VecOps::RVec np_tracks, + VertexingUtils::FCCAnalysesVertex PV, + bool tight = false ) ; + + /// + + /** returns V0s reconstructed from a set of tracks (as an FCCAnalysesV0 object) + * constraint thresholds can be chosen out of two sets + */ + VertexingUtils::FCCAnalysesV0 get_V0s( ROOT::VecOps::RVec np_tracks, + VertexingUtils::FCCAnalysesVertex PV, + bool tight, + double chi2_cut=9. ) ; + + /** returns V0s reconstructed from a set of tracks (as an FCCAnalysesV0 object) + * constraint thresholds can be set manually + */ + VertexingUtils::FCCAnalysesV0 get_V0s( ROOT::VecOps::RVec np_tracks, + VertexingUtils::FCCAnalysesVertex PV, + double Ks_invM_low=0.493, double Ks_invM_high=0.503, double Ks_dis=0.5, double Ks_cosAng=0.999, + double Lambda_invM_low=1.111, double Lambda_invM_high=1.121, double Lambda_dis=0.5, double Lambda_cosAng=0.99995, + double Gamma_invM_low=0., double Gamma_invM_high=0.005, double Gamma_dis=9, double Gamma_cosAng=0.99995, + double chi2_cut=9. ) ; + + /** returns V0s reconstructed in each jet of the event (as an FCCAnalysesV0 object) + * need to perform jet clustering before calling this function + */ + VertexingUtils::FCCAnalysesV0 get_V0s_jet( ROOT::VecOps::RVec recoparticles, + ROOT::VecOps::RVec thetracks, + ROOT::VecOps::RVec isInPrimary, + ROOT::VecOps::RVec jets, + std::vector> jet_consti, + VertexingUtils::FCCAnalysesVertex PV, + bool tight = true, + double chi2_cut=9. ); + + /** returns invariant mass, distance from PV, and colliniarity variables for all V0 candidates + * [0] -> invM_Ks [GeV] + * [1] -> invM_Lambda1 [GeV] + * [2] -> invM_Lambda2 [GeV] + * [3] -> invM_Gamma [GeV] + * [4] -> r [mm] + * [5] -> r.p [unit vector] + * boolean check for if chi2 constraint needs to be checked + * skip the candidate with output size 0 - doesn't pass the chi2 cut + */ + ROOT::VecOps::RVec get_V0candidate( VertexingUtils::FCCAnalysesVertex &V0_vtx, + ROOT::VecOps::RVec tr_pair, + VertexingUtils::FCCAnalysesVertex PV, + bool chi2, + double chi2_cut=9. ); + + /** functions to fill constraint thresholds + * tight -> tight constraints + * !tight -> loose constraints + * also an option to choose constraint threshold + * + * [0] -> invariant mass lower limit [GeV] + * [1] -> invariant mass upper limit [GeV] + * [2] -> distance from PV [mm] + * [3] -> colinearity + */ + ROOT::VecOps::RVec constraints_Ks(bool tight) ; + ROOT::VecOps::RVec constraints_Lambda0(bool tight) ; + ROOT::VecOps::RVec constraints_Gamma(bool tight) ; + // + ROOT::VecOps::RVec constraints_Ks(double invM_low, double invM_high, double dis, double cosAng) ; + ROOT::VecOps::RVec constraints_Lambda0(double invM_low, double invM_high, double dis, double cosAng) ; + ROOT::VecOps::RVec constraints_Gamma(double invM_low, double invM_high, double dis, double cosAng) ; + /** returns indices of the all pairs of tracks that pass a set of constraints from a vector of (non-primary) tracks + * default chi2 threshold is 9 and default invariant mass threshold is 10GeV + */ + //ROOT::VecOps::RVec> VertexSeed_all( ROOT::VecOps::RVec tracks, + // VertexingUtils::FCCAnalysesVertex PV, + // double chi2_cut=9., double invM_cut=10.) ; + + /** adds indices of tracks (from the remaining tracks) that pass a set of constraints to the (seed) vtx + * default chi2 threshold is 9 and default invariant mass threshold is 10GeV + * default threshold for track's chi2 contribution is 5 (?) + */ + //ROOT::VecOps::RVec addTrack_multi( ROOT::VecOps::RVec tracks, + // ROOT::VecOps::RVec vtx_tr, + // VertexingUtils::FCCAnalysesVertex PV, + // double chi2_cut=9., double invM_cut=10., double chi2Tr_cut=5.) ; + + +}//end NS VertexFinderLCFIPlus + +}//end NS FCCAnalyses +#endif diff --git a/analyzers/dataframe/FCCAnalyses/VertexFitterSimple.h b/analyzers/dataframe/FCCAnalyses/VertexFitterSimple.h index 20e595cee5..fa408a9ad3 100644 --- a/analyzers/dataframe/FCCAnalyses/VertexFitterSimple.h +++ b/analyzers/dataframe/FCCAnalyses/VertexFitterSimple.h @@ -22,6 +22,9 @@ #include "edm4hep/VertexData.h" #include "edm4hep/Vertex.h" +#include "VertexFit.h" // from Delphes - updates Franco, Jul 2022 +#include "VertexMore.h" + /** Vertex interface using Franco Bedeshi's code. This represents a set functions and utilities to perfom vertexing from a list of tracks. @@ -45,13 +48,17 @@ namespace VertexFitterSimple{ double sigmax=0., double sigmay=0., double sigmaz=0., double bsc_x=0., double bsc_y=0., double bsc_z=0. ) ; + VertexingUtils::FCCAnalysesVertex VertexFitter_Tk( int Primary, ROOT::VecOps::RVec tracks, + const ROOT::VecOps::RVec& alltracks, + bool BeamSpotConstraint = false, + double sigmax=0., double sigmay=0., double sigmaz=0., + double bsc_x=0., double bsc_y=0., double bsc_z=0. ) ; + /// Return the tracks that are flagged as coming from the primary vertex - ROOT::VecOps::RVec get_PrimaryTracks( VertexingUtils::FCCAnalysesVertex initialVertex, - ROOT::VecOps::RVec tracks, + ROOT::VecOps::RVec get_PrimaryTracks( ROOT::VecOps::RVec tracks, bool BeamSpotConstraint, double bsc_sigmax, double bsc_sigmay, double bsc_sigmaz, - double bsc_x, double bsc_y, double bsc_z, - int ipass = 0 ) ; + double bsc_x, double bsc_y, double bsc_z ) ; /// Return the tracks that are NOT flagged as coming from the primary vertex @@ -63,7 +70,7 @@ namespace VertexFitterSimple{ ROOT::VecOps::RVec primaryTracks ) ; - +/* Double_t FastRv(TVectorD p1, TVectorD p2) ; TMatrixDSym RegInv3(TMatrixDSym &Smat0) ; TMatrixD Fill_A(TVectorD par, Double_t phi) ; @@ -74,6 +81,12 @@ namespace VertexFitterSimple{ TVectorD XPtoPar(TVector3 x, TVector3 p, Double_t Q); TVector3 ParToP(TVectorD Par); + TVectorD XPtoPar(TVector3 x, TVector3 p, Double_t Q); + TVector3 ParToP(TVectorD Par); +*/ + + + }//end NS VertexFitterSimple }//end NS FCCAnalyses diff --git a/analyzers/dataframe/FCCAnalyses/VertexingUtils.h b/analyzers/dataframe/FCCAnalyses/VertexingUtils.h index 851be8222e..53ca02886e 100644 --- a/analyzers/dataframe/FCCAnalyses/VertexingUtils.h +++ b/analyzers/dataframe/FCCAnalyses/VertexingUtils.h @@ -12,10 +12,13 @@ #include "edm4hep/VertexData.h" #include "edm4hep/Vertex.h" +#include "TLorentzVector.h" #include "TVectorD.h" #include "TVector3.h" #include "TMatrixDSym.h" +#include "fastjet/JetDefinition.hh" + /** Vertexing utilities */ @@ -23,18 +26,33 @@ namespace FCCAnalyses{ namespace VertexingUtils{ + /// from delphes: returns track state parameters (delphes convention) for a given vertex (x), momentum (p) and charge + TVectorD XPtoPar(TVector3 x, TVector3 p, Double_t Q); + + /// from delphes: returns the momentum corresponding to a given track state + TVector3 ParToP(TVectorD Par); + + /// Structure to keep useful track information that is related to the vertex struct FCCAnalysesVertex{ edm4hep::VertexData vertex; int ntracks; int mc_ind; ///index in the MC vertex collection if any - ROOT::VecOps::RVec reco_ind; + ROOT::VecOps::RVec reco_ind; // indices of the tracks fitted to that vertex, in the collection of all tracks ROOT::VecOps::RVec reco_chi2; ROOT::VecOps::RVec< TVector3 > updated_track_momentum_at_vertex; ROOT::VecOps::RVec< TVectorD > updated_track_parameters; ROOT::VecOps::RVec final_track_phases; }; + /// Structure to keep useful information that is related to the V0 + struct FCCAnalysesV0{ + ROOT::VecOps::RVec vtx; // vertex object + ROOT::VecOps::RVec pdgAbs; // pdg ID from reconstructions + ROOT::VecOps::RVec invM; // invariant mass + ROOT::VecOps::RVec nSV_jet; // no of V0s per jet + }; + /// Structure to keep useful track information that is related to the vertex struct FCCAnalysesVertexMC{ TVector3 vertex; @@ -80,20 +98,225 @@ namespace VertexingUtils{ /// Retrieve the number of tracks from FCCAnalysesVertex int get_VertexNtrk( FCCAnalysesVertex TheVertex ) ; + ROOT::VecOps::RVec get_VertexNtrk( ROOT::VecOps::RVec vertices ) ; + /// Retrieve the tracks indices from FCCAnalysesVertex ROOT::VecOps::RVec get_VertexRecoInd( FCCAnalysesVertex TheVertex ) ; + /// Retrieve the indices of the tracks fitted to that vertex, but now in the collection of RecoParticles + ROOT::VecOps::RVec get_VertexRecoParticlesInd( FCCAnalysesVertex TheVertex, + const ROOT::VecOps::RVec& reco ); + /// Return the number of tracks in a given track collection int get_nTracks(ROOT::VecOps::RVec tracks); + /// compare two track states + bool compare_Tracks( const edm4hep::TrackState& tr1, const edm4hep::TrackState& tr2 ) ; + + /////////////////////////////////////////////////// + /// functions used for SV reconstruction + + /** returns a vector of all vertices (PV and SVs), e.g to use in myUtils::get_Vertex_d2PV + * first entry: PV, all subsequent entries: SVs + */ + ROOT::VecOps::RVec get_all_vertices( FCCAnalysesVertex PV, + ROOT::VecOps::RVec SV ); + + ROOT::VecOps::RVec get_all_vertices( FCCAnalysesVertex PV, + ROOT::VecOps::RVec> SV ); + + /** returns the invariant mass of a two-track vertex + * CAUTION: m1 -> mass of first track, m2 -> mass of second track + * by default both pions + */ + double get_invM_pairs( FCCAnalysesVertex vertex, + double m1 = 0.13957039, + double m2 = 0.13957039) ; + + ROOT::VecOps::RVec get_invM_pairs( ROOT::VecOps::RVec vertices, + double m1 = 0.13957039, + double m2 = 0.13957039 ) ; + + /** returns the invariant mass of a vertex + * assuming all tracks to be pions + */ + double get_invM( FCCAnalysesVertex vertex ) ; + + /** returns the invariant mass of a vector of vertices + * assuming all tracks to be pions + */ + ROOT::VecOps::RVec get_invM( ROOT::VecOps::RVec vertices ) ; + + /** returns the cos of the angle b/n V0 candidate's (or any vtx's) momentum & PV to V0 (vtx) displacement vector */ + double get_PV2V0angle( FCCAnalysesVertex V0, + FCCAnalysesVertex PV) ; + + /** returns cos of the angle b/n track (that form the vtx) momentum sum & PV to vtx displacement vector */ + double get_PV2vtx_angle( ROOT::VecOps::RVec tracks, + FCCAnalysesVertex vtx, + FCCAnalysesVertex PV ) ; + + /** returns a track's energy + * assuming the track to be a pion + */ + double get_trackE( edm4hep::TrackState track ) ; + + /////////////////////////////////////////////////// + /// V0 Reconstruction + /// Return the number of reconstructed V0s + int get_n_SV( FCCAnalysesV0 SV ); + + /// Return the vertex position of all reconstructed V0s (in mm) + ROOT::VecOps::RVec get_position_SV( FCCAnalysesV0 SV ); + + /// Return the PDG IDs of all reconstructed V0s + ROOT::VecOps::RVec get_pdg_V0( FCCAnalysesV0 V0 ); + + /// Return the invariant masses of all reconstructed V0s + ROOT::VecOps::RVec get_invM_V0( FCCAnalysesV0 V0 ); + + /// Return the momentum of all reconstructed V0s + ROOT::VecOps::RVec get_p_SV( FCCAnalysesV0 SV ); + + /// Return chi2 of all reconstructed V0s + ROOT::VecOps::RVec get_chi2_SV( FCCAnalysesV0 SV ); + + /////////////////////////////////////////////////// + + /// Passing a vector of FCCAnalysesVertex instead of FCCAnalysesV0 + /// Return the number of reconstructed SVs + int get_n_SV( ROOT::VecOps::RVec vertices ); + + /// Return the momentum of all reconstructed vertices (or V0.vtx) + ROOT::VecOps::RVec get_p_SV( ROOT::VecOps::RVec vertices ); + + /// Return the vertex position of all reconstructed SVs (in mm) + ROOT::VecOps::RVec get_position_SV( ROOT::VecOps::RVec vertices ); + + /// Return the momentum magnitude of all reconstructed vertices (or V0.vtx) + ROOT::VecOps::RVec get_pMag_SV( ROOT::VecOps::RVec vertices ); + + /// Return chi2 of all reconstructed vertices (or V0.vtx) + ROOT::VecOps::RVec get_chi2_SV( ROOT::VecOps::RVec vertices ); - // --- Internal methods needed by the code of Franco B : - TVectorD get_trackParam( edm4hep::TrackState & atrack) ; - TMatrixDSym get_trackCov( edm4hep::TrackState & atrack) ; + /// Return normalised chi2 of all reconstructed vertices (or V0.vtx) + ROOT::VecOps::RVec get_norm_chi2_SV( ROOT::VecOps::RVec vertices ); + + /// Return no of DOF of all reconstructed vertices (or V0.vtx) + ROOT::VecOps::RVec get_nDOF_SV( ROOT::VecOps::RVec vertices ); + + /// Return polar angle (theta) of all reconstructed vertices (or V0.vtx) + ROOT::VecOps::RVec get_theta_SV( ROOT::VecOps::RVec vertices ); + + /// Return azimuthal angle (phi) of all reconstructed vertices (or V0.vtx) + ROOT::VecOps::RVec get_phi_SV( ROOT::VecOps::RVec vertices ); + + /// Return polar angle (theta) of all reconstructed vertices wrt jets (or V0.vtx) + ROOT::VecOps::RVec get_relTheta_SV( ROOT::VecOps::RVec vertices, + ROOT::VecOps::RVec nSV_jet, + ROOT::VecOps::RVec jets ); + + /// Return azimuthal angle (phi) of all reconstructed vertices wrt jets (or V0.vtx) + ROOT::VecOps::RVec get_relPhi_SV( ROOT::VecOps::RVec vertices, + ROOT::VecOps::RVec nSV_jet, + ROOT::VecOps::RVec jets ); + + /// Return the pointing angle of all reconstructed vertices (or V0.vtx) + ROOT::VecOps::RVec get_pointingangle_SV( ROOT::VecOps::RVec vertices, + FCCAnalysesVertex PV ); + + /// Return the distances of all reconstructed vertices from PV in xy plane [mm] (or V0.vtx) + ROOT::VecOps::RVec get_dxy_SV( ROOT::VecOps::RVec vertices, + FCCAnalysesVertex PV ); + + /// Return the distances of all reconstructed vertices from PV in 3D [mm] (or V0.vtx) + ROOT::VecOps::RVec get_d3d_SV( ROOT::VecOps::RVec vertices, + FCCAnalysesVertex PV ); + + /// Return the distances of all reconstructed verteces from given TVector3d object in 3D [mm] (or V0.vtx) + ROOT::VecOps::RVec get_d3d_SV_obj( ROOT::VecOps::RVec vertices, + TVector3 location ); + + /// Return the distances of all reconstructed verteces from given edm4hep::Vector3d object in 3D [mm] (or V0.vtx) + ROOT::VecOps::RVec get_d3d_SV_obj( ROOT::VecOps::RVec vertices, + edm4hep::Vector3d location ); + + /// Return the distance in R of all reconstructed verteces from given TVector3d object in 3D [mm] (or V0.vtx) + ROOT::VecOps::RVec get_dR_SV_obj( ROOT::VecOps::RVec vertices, + TVector3 location ); + + /// Return the distances in R of all reconstructed verteces from given edm4hep::Vector3d object in 3D [mm] (or V0.vtx) + ROOT::VecOps::RVec get_dR_SV_obj( ROOT::VecOps::RVec vertices, + edm4hep::Vector3d location ); + + /////////////////////////////////////////////////// + + /// For get_SV_jets /// + + /// Return the number of reconstructed SVs + ROOT::VecOps::RVec get_all_SVs( ROOT::VecOps::RVec> vertices ); + + /// Return the total number of reconstructed SVs + int get_n_SV( ROOT::VecOps::RVec> vertices ); + + /// Return the number of reconstructed SVs per jet + ROOT::VecOps::RVec get_n_SV_jets( ROOT::VecOps::RVec> vertices ); + + /// Return the tracks separated by jets + std::vector> get_tracksInJets( ROOT::VecOps::RVec recoparticles, + ROOT::VecOps::RVec thetracks, + ROOT::VecOps::RVec jets, + std::vector> jet_consti ); + + /// Return V0s separated by jets + ROOT::VecOps::RVec> get_svInJets( ROOT::VecOps::RVec vertices, + ROOT::VecOps::RVec nSV_jet ); + + // --- for get_SV_jets --- // + ROOT::VecOps::RVec> get_invM( ROOT::VecOps::RVec> vertices ); + ROOT::VecOps::RVec> get_p_SV( ROOT::VecOps::RVec> vertices ); + ROOT::VecOps::RVec> get_pMag_SV( ROOT::VecOps::RVec> vertices ); + ROOT::VecOps::RVec> get_VertexNtrk( ROOT::VecOps::RVec> vertices ); + ROOT::VecOps::RVec> get_chi2_SV( ROOT::VecOps::RVec> vertices ); + ROOT::VecOps::RVec> get_norm_chi2_SV( ROOT::VecOps::RVec> vertices ); + ROOT::VecOps::RVec> get_nDOF_SV( ROOT::VecOps::RVec> vertices ); + ROOT::VecOps::RVec> get_theta_SV( ROOT::VecOps::RVec> vertices ); + ROOT::VecOps::RVec> get_phi_SV( ROOT::VecOps::RVec> vertices ); + ROOT::VecOps::RVec> get_relTheta_SV( ROOT::VecOps::RVec> vertices, ROOT::VecOps::RVec jets ); + ROOT::VecOps::RVec> get_relPhi_SV( ROOT::VecOps::RVec> vertices, ROOT::VecOps::RVec jets ); + ROOT::VecOps::RVec> get_pointingangle_SV( ROOT::VecOps::RVec> vertices, FCCAnalysesVertex PV ); + ROOT::VecOps::RVec> get_dxy_SV( ROOT::VecOps::RVec> vertices, FCCAnalysesVertex PV ); + ROOT::VecOps::RVec> get_d3d_SV( ROOT::VecOps::RVec> vertices, FCCAnalysesVertex PV ); + ROOT::VecOps::RVec> get_pdg_V0( ROOT::VecOps::RVec pdg, ROOT::VecOps::RVec nSV_jet ); + ROOT::VecOps::RVec> get_invM_V0( ROOT::VecOps::RVec invM, ROOT::VecOps::RVec nSV_jet ); + /// Return the vertex position of all reconstructed SVs (in mm) + ROOT::VecOps::RVec> get_position_SV( ROOT::VecOps::RVec> vertices ); + // --- for get_SV_jets --- // + + float get_trackMom( edm4hep::TrackState & atrack ); + + +// --- Conversion methods between the Delphes and edm4hep conventions + +/// convert track parameters, from edm4hep to delphes conventions + TVectorD Edm4hep2Delphes_TrackParam( const TVectorD& param, bool Units_mm ); +/// convert track parameters, from delphes to edm4hep conventions + TVectorD Delphes2Edm4hep_TrackParam( const TVectorD& param, bool Units_mm ); +/// convert track covariance matrix, from edm4hep to delphes conventions + TMatrixDSym Edm4hep2Delphes_TrackCovMatrix( const std::array& covMatrix, bool Units_mm ); +/// convert track covariance matrix, from delphes to edm4hep conventions + std::array Delphes2Edm4hep_TrackCovMatrix( const TMatrixDSym& cov, bool Units_mm ) ; + + + /// --- Internal methods needed by the code of Franco B: + TVectorD get_trackParam( edm4hep::TrackState & atrack, bool Units_mm = false) ; + TMatrixDSym get_trackCov( edm4hep::TrackState & atrack, bool Units_mm = false) ; TVectorD ParToACTS(TVectorD Par); TMatrixDSym CovToACTS(TMatrixDSym Cov,TVectorD Par); + + }//end NS VertexingUtils }//end NS FCCAnalyses diff --git a/analyzers/dataframe/FCCAnalyses/myUtils.h b/analyzers/dataframe/FCCAnalyses/myUtils.h index a4a1e0aa40..2b8f3a6ded 100644 --- a/analyzers/dataframe/FCCAnalyses/myUtils.h +++ b/analyzers/dataframe/FCCAnalyses/myUtils.h @@ -50,21 +50,6 @@ namespace myUtils{ }; - struct build_composite_vertex { - build_composite_vertex(int arg_n, int arg_charge, float arg_masslow, float arg_masshigh, float arg_p, bool arg_cc, bool arg_filterPV); - int m_n=3; - int m_charge=0; - float m_masslow=0.05; - float m_masshigh=0.05; - float m_p=1.; - bool m_cc=true; - bool m_filterPV=true; - - ROOT::VecOps::RVec operator() (ROOT::VecOps::RVec recop, - ROOT::VecOps::RVec tracks, - ROOT::VecOps::RVec in, - ROOT::VecOps::RVec pvindex); - }; struct build_tau23pi { build_tau23pi( float arg_masslow, float arg_masshigh, float arg_p, float arg_angle, bool arg_rho); @@ -78,21 +63,6 @@ namespace myUtils{ }; - struct build_tau23pi_vertexing { - build_tau23pi_vertexing(int arg_charge, float arg_masslow, float arg_masshigh, float arg_p, float arg_angle, bool arg_cc, bool arg_filterPV, bool arg_rho); - int m_charge=1; - float m_masslow=0.05; - float m_masshigh=3.0; - float m_p=1.; - float m_angle=1.; - bool m_cc=true; - bool m_filterPV=true; - bool m_rho = true; - ROOT::VecOps::RVec operator() (ROOT::VecOps::RVec recop, - ROOT::VecOps::RVec tracks, - ROOT::VecOps::RVec in, - ROOT::VecOps::RVec pvindex); - }; struct sel_PV { sel_PV(bool arg_closest); @@ -369,11 +339,6 @@ namespace myUtils{ ROOT::VecOps::RVec vertex); - ROOT::VecOps::RVec awkwardtest(ROOT::VecOps::RVec recop, - ROOT::VecOps::RVec tracks, - ROOT::VecOps::RVec recind, - ROOT::VecOps::RVec mcind, - ROOT::VecOps::RVec mc); float build_invmass(ROOT::VecOps::RVec recop, ROOT::VecOps::RVec index); diff --git a/analyzers/dataframe/case-studies/CMakeLists.txt b/analyzers/dataframe/case-studies/CMakeLists.txt index 3cea4ec475..a3416ecfae 100644 --- a/analyzers/dataframe/case-studies/CMakeLists.txt +++ b/analyzers/dataframe/case-studies/CMakeLists.txt @@ -13,11 +13,9 @@ include_directories(${EDM4HEP_INCLUDE_DIRS} ${podio_INCLUDE_DIR} ${FASTJET_INCLUDE_DIR} ${acts_INCLUDE_DIR} - ${AWKWARD_INCLUDE} ${VDT_INCLUDE_DIR} ) -message(STATUS "includes-------------------------- dataframe awkward: ${AWKWARD_INCLUDE}") message(STATUS "includes-------------------------- dataframe edm4hep: ${EDM4HEP_INCLUDE_DIRS}") message(STATUS "includes-------------------------- dataframe podio : ${podio_INCLUDE_DIR}") message(STATUS "includes-------------------------- dataframe fastjet: ${FASTJET_INCLUDE_DIRS}") @@ -39,7 +37,6 @@ target_include_directories(FCCAnalysesCS PUBLIC ${FCCEDM_INCLUDE_DIRS} ${FASTJET_INCLUDE_DIR} ${acts_INCLUDE_DIR} - ${AWKWARD_INCLUDE} ) target_link_libraries(FCCAnalysesCS @@ -54,10 +51,6 @@ target_link_libraries(FCCAnalysesCS ${FASTJET_LIBRARIES} ${acts_LIBRARY} ActsCore - #${AWKWARD_LIBRARIES} - ${LIBAWKWARD} - ${CPU-KERNELS} - ${LIBDL} ) diff --git a/analyzers/dataframe/src/Algorithms.cc b/analyzers/dataframe/src/Algorithms.cc index 1698f658a6..9d8f6ceda8 100644 --- a/analyzers/dataframe/src/Algorithms.cc +++ b/analyzers/dataframe/src/Algorithms.cc @@ -1,5 +1,6 @@ #include "FCCAnalyses/Algorithms.h" #include "FCCAnalyses/Utils.h" + #include "Math/Minimizer.h" #include "Math/IFunction.h" #include "Math/Factory.h" @@ -456,6 +457,78 @@ float getAxisCosTheta(const ROOT::VecOps::RVec & axis, return result; } + + +jets_TwoHemispheres::jets_TwoHemispheres(int arg_sorted, int arg_recombination) : m_sorted( arg_sorted ), m_recombination( arg_recombination ) { + if ( arg_recombination != 0 ) { + std::cout << " ....... in jets_TwoHemispheres: only E-scheme is implemented so far. m_recombination is set to zero. " << std::endl; + m_recombination = 0; + } +}; + +JetClustering::FCCAnalysesJet jets_TwoHemispheres::operator() ( + const ROOT::VecOps::RVec & RP_px, + const ROOT::VecOps::RVec & RP_py, + const ROOT::VecOps::RVec & RP_pz, + const ROOT::VecOps::RVec & RP_e, + const ROOT::VecOps::RVec & RP_costheta ) { + + JetClustering::FCCAnalysesJet result; + + std::vector constituents_JetPlus; + std::vector constituents_JetMimus; + + float px_plus=0; + float py_plus=0; + float pz_plus=0; + float e_plus=0; + float px_minus=0; + float py_minus=0; + float pz_minus=0; + float e_minus=0; + + for ( int i=0; i < RP_costheta.size(); i++) { + if ( RP_costheta[i] > 0 ) { + constituents_JetPlus.push_back( i ); + px_plus += RP_px[i]; + py_plus += RP_py[i]; + pz_plus += RP_pz[i]; + e_plus += RP_e[i]; + } + else { + constituents_JetMimus.push_back( i ); + px_minus += RP_px[i]; + py_minus += RP_py[i]; + pz_minus += RP_pz[i]; + e_minus += RP_e[i]; + } + } + + + float pt_plus = sqrt( pow( px_plus,2) + pow( py_plus,2) + pow( pz_plus, 2) ) ; + float pt_minus = sqrt( pow( px_minus, 2) + pow( py_minus, 2) + pow( pz_minus, 2) ) ; + + bool plus_is_first = ( ( m_sorted == 1 && e_plus > e_minus ) || ( m_sorted == 0 && pt_plus > pt_minus ) ) ; + + // sorting : + if ( plus_is_first ) { + result.jets.push_back( fastjet::PseudoJet( px_plus, py_plus, pz_plus, e_plus) ); + result.jets.push_back( fastjet::PseudoJet( px_minus, py_minus, pz_minus, e_minus) ); + result.constituents.push_back( constituents_JetPlus ); + result.constituents.push_back( constituents_JetMimus ); + } + else { + result.jets.push_back( fastjet::PseudoJet( px_minus, py_minus, pz_minus, e_minus) ); + result.jets.push_back( fastjet::PseudoJet( px_plus, py_plus, pz_plus, e_plus) ); + result.constituents.push_back( constituents_JetMimus ); + result.constituents.push_back( constituents_JetPlus ); + } + + + return result; +} + + }//end NS Algorithms }//end NS FCCAnalyses diff --git a/analyzers/dataframe/src/JetClusteringUtils.cc b/analyzers/dataframe/src/JetClusteringUtils.cc index dc16c2736e..26ac9010f1 100644 --- a/analyzers/dataframe/src/JetClusteringUtils.cc +++ b/analyzers/dataframe/src/JetClusteringUtils.cc @@ -1,246 +1,342 @@ #include "FCCAnalyses/JetClusteringUtils.h" +#include "TLorentzVector.h" namespace FCCAnalyses { - namespace JetClusteringUtils { - - ROOT::VecOps::RVec get_pseudoJets(const JetClustering::FCCAnalysesJet& jets) { - return jets.jets; - } - - std::vector> get_constituents(const JetClustering::FCCAnalysesJet& jets) { - return jets.constituents; - } - - float get_exclusive_dmerge(const JetClustering::FCCAnalysesJet& in, int n) { - float d = -1; - if (n >= 1 && n <= Nmax_dmerge) - d = in.exclusive_dmerge[n - 1]; - return d; - } - - float get_exclusive_dmerge_max(const JetClustering::FCCAnalysesJet& in, int n) { - float d = -1; - if (n >= 1 && n <= Nmax_dmerge) - d = in.exclusive_dmerge_max[n - 1]; - return d; - } - - std::vector set_pseudoJets(const ROOT::VecOps::RVec& px, - const ROOT::VecOps::RVec& py, - const ROOT::VecOps::RVec& pz, - const ROOT::VecOps::RVec& e) { - std::vector result; - unsigned index = 0; - for (size_t i = 0; i < px.size(); ++i) { - result.emplace_back(px[i], py[i], pz[i], e[i]); - result.back().set_user_index(index); - ++index; - } - return result; - } - - std::vector set_pseudoJets_xyzm(const ROOT::VecOps::RVec& px, - const ROOT::VecOps::RVec& py, - const ROOT::VecOps::RVec& pz, - const ROOT::VecOps::RVec& m) { - std::vector result; - unsigned index = 0; - for (size_t i = 0; i < px.size(); ++i) { - double px_d = px[i]; - double py_d = py[i]; - double pz_d = pz[i]; - double m_d = m[i]; - double E_d = sqrt(px_d * px_d + py_d * py_d + pz_d * pz_d + m_d * m_d); - result.emplace_back(px_d, py_d, pz_d, E_d); - result.back().set_user_index(index); - ++index; - } - return result; - } - - ROOT::VecOps::RVec get_px(const ROOT::VecOps::RVec& in) { - ROOT::VecOps::RVec result; - for (auto& p : in) { - result.push_back(p.px()); - } - return result; - } - - ROOT::VecOps::RVec get_py(const ROOT::VecOps::RVec& in) { - ROOT::VecOps::RVec result; - for (auto& p : in) { - result.push_back(p.py()); - } - return result; - } - - ROOT::VecOps::RVec get_pz(const ROOT::VecOps::RVec& in) { - ROOT::VecOps::RVec result; - for (auto& p : in) { - result.push_back(p.pz()); - } - return result; - } - - ROOT::VecOps::RVec get_e(const ROOT::VecOps::RVec& in) { - ROOT::VecOps::RVec result; - for (auto& p : in) { - result.push_back(p.E()); - } - return result; - } - - ROOT::VecOps::RVec get_pt(const ROOT::VecOps::RVec& in) { - ROOT::VecOps::RVec result; - for (auto& p : in) { - result.push_back(p.pt()); - } - return result; - } - - ROOT::VecOps::RVec get_m(const ROOT::VecOps::RVec& in) { - ROOT::VecOps::RVec result; - for (auto& p : in) { - result.push_back(p.m()); - } - return result; - } - - ROOT::VecOps::RVec get_eta(const ROOT::VecOps::RVec& in) { - ROOT::VecOps::RVec result; - for (auto& p : in) { - result.push_back(p.eta()); - } - return result; +namespace JetClusteringUtils { + +ROOT::VecOps::RVec +get_pseudoJets(const JetClustering::FCCAnalysesJet &jets) { + return jets.jets; +} + +std::vector> +get_constituents(const JetClustering::FCCAnalysesJet &jets) { + return jets.constituents; +} + +float get_exclusive_dmerge(const JetClustering::FCCAnalysesJet &in, int n) { + float d = -1; + if (n >= 1 && n <= Nmax_dmerge && in.exclusive_dmerge.size() > n - 1) + d = in.exclusive_dmerge[n - 1]; + return d; +} + +float get_exclusive_dmerge_max(const JetClustering::FCCAnalysesJet &in, int n) { + float d = -1; + if (n >= 1 && n <= Nmax_dmerge && in.exclusive_dmerge.size() > n - 1) + d = in.exclusive_dmerge_max[n - 1]; + return d; +} + +std::vector set_pseudoJets( + const ROOT::VecOps::RVec &px, const ROOT::VecOps::RVec &py, + const ROOT::VecOps::RVec &pz, const ROOT::VecOps::RVec &e) { + std::vector result; + unsigned index = 0; + for (size_t i = 0; i < px.size(); ++i) { + result.emplace_back(px[i], py[i], pz[i], e[i]); + result.back().set_user_index(index); + ++index; + } + return result; +} + +std::vector set_pseudoJets_xyzm( + const ROOT::VecOps::RVec &px, const ROOT::VecOps::RVec &py, + const ROOT::VecOps::RVec &pz, const ROOT::VecOps::RVec &m) { + std::vector result; + unsigned index = 0; + for (size_t i = 0; i < px.size(); ++i) { + double px_d = px[i]; + double py_d = py[i]; + double pz_d = pz[i]; + double m_d = m[i]; + double E_d = sqrt(px_d * px_d + py_d * py_d + pz_d * pz_d + m_d * m_d); + result.emplace_back(px_d, py_d, pz_d, E_d); + result.back().set_user_index(index); + ++index; + } + return result; +} + +ROOT::VecOps::RVec +get_px(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.px()); + } + return result; +} + +ROOT::VecOps::RVec +get_py(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.py()); + } + return result; +} + +ROOT::VecOps::RVec +get_pz(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.pz()); + } + return result; +} + +ROOT::VecOps::RVec +get_e(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.E()); + } + return result; +} + +ROOT::VecOps::RVec +get_pt(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.pt()); + } + return result; +} + +ROOT::VecOps::RVec +get_p(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(sqrt(p.pt() * p.pt() + p.pz() * p.pz())); + } + return result; +} + +ROOT::VecOps::RVec +get_m(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.m()); + } + return result; +} + +ROOT::VecOps::RVec +get_eta(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.eta()); + } + return result; +} + +ROOT::VecOps::RVec +get_phi(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.phi()); + } + return result; +} + +ROOT::VecOps::RVec +get_phi_std(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.phi_std()); + } + return result; +} + +ROOT::VecOps::RVec +get_theta(const ROOT::VecOps::RVec &in) { + ROOT::VecOps::RVec result; + for (auto &p : in) { + result.push_back(p.theta()); + } + return result; +} + +sel_pt::sel_pt(float arg_min_pt) : m_min_pt(arg_min_pt){}; +ROOT::VecOps::RVec +sel_pt::operator()(ROOT::VecOps::RVec in) { + ROOT::VecOps::RVec result; + result.reserve(in.size()); + for (size_t i = 0; i < in.size(); ++i) { + auto &p = in[i]; + if (std::sqrt(std::pow(p.px(), 2) + std::pow(p.py(), 2)) > m_min_pt) { + result.emplace_back(p); } - - ROOT::VecOps::RVec get_phi(const ROOT::VecOps::RVec& in) { - ROOT::VecOps::RVec result; - for (auto& p : in) { - result.push_back(p.phi()); - } - return result; + } + return result; +} + +JetClustering::FCCAnalysesJet initialise_FCCAnalysesJet() { + JetClustering::FCCAnalysesJet result; + std::vector jets; + std::vector> constituents; + + result.jets = jets; + result.constituents = constituents; + + std::vector exclusive_dmerge; + std::vector exclusive_dmerge_max; + exclusive_dmerge.reserve(Nmax_dmerge); + exclusive_dmerge_max.reserve(Nmax_dmerge); + + result.exclusive_dmerge = exclusive_dmerge; + result.exclusive_dmerge_max = exclusive_dmerge_max; + + return result; +}; + +JetClustering::FCCAnalysesJet +build_FCCAnalysesJet(const std::vector &in, + const std::vector &dmerge, + const std::vector &dmerge_max) { + JetClustering::FCCAnalysesJet result = initialise_FCCAnalysesJet(); + for (const auto &pjet : in) { + result.jets.push_back(pjet); + + std::vector consts = pjet.constituents(); + std::vector tmpvec; + for (const auto &constituent : consts) { + tmpvec.push_back(constituent.user_index()); } - - ROOT::VecOps::RVec get_theta(const ROOT::VecOps::RVec& in) { - ROOT::VecOps::RVec result; - for (auto& p : in) { - result.push_back(p.theta()); - } - return result; - } - - JetClustering::FCCAnalysesJet initialise_FCCAnalysesJet() { - JetClustering::FCCAnalysesJet result; - std::vector jets; - std::vector> constituents; - - result.jets = jets; - result.constituents = constituents; - - std::vector exclusive_dmerge; - std::vector exclusive_dmerge_max; - exclusive_dmerge.reserve(Nmax_dmerge); - exclusive_dmerge_max.reserve(Nmax_dmerge); - - result.exclusive_dmerge = exclusive_dmerge; - result.exclusive_dmerge_max = exclusive_dmerge_max; - - return result; - }; - - JetClustering::FCCAnalysesJet build_FCCAnalysesJet(const std::vector& in, - const std::vector& dmerge, - const std::vector& dmerge_max) { - JetClustering::FCCAnalysesJet result = initialise_FCCAnalysesJet(); - for (const auto& pjet : in) { - result.jets.push_back(pjet); - - std::vector consts = pjet.constituents(); - std::vector tmpvec; - for (const auto& constituent : consts) { - tmpvec.push_back(constituent.user_index()); + result.constituents.push_back(tmpvec); + } + result.exclusive_dmerge = dmerge; + result.exclusive_dmerge_max = dmerge_max; + return result; +} + +std::vector +build_jets(fastjet::ClusterSequence &cs, int exclusive, float cut, int sorted) { + std::vector pjets; + + if (sorted == 0) { + if (exclusive == 0) + pjets = fastjet::sorted_by_pt(cs.inclusive_jets(cut)); + else if (exclusive == 1) + pjets = fastjet::sorted_by_pt(cs.exclusive_jets(cut)); + else if (exclusive == 2) + pjets = fastjet::sorted_by_pt(cs.exclusive_jets(int(cut))); + else if (exclusive == 3) + pjets = fastjet::sorted_by_pt(cs.exclusive_jets_up_to(int(cut))); + else if (exclusive == 4) + pjets = fastjet::sorted_by_pt(cs.exclusive_jets_ycut(cut)); + } else if (sorted == 1) { + if (exclusive == 0) + pjets = fastjet::sorted_by_E(cs.inclusive_jets(cut)); + else if (exclusive == 1) + pjets = fastjet::sorted_by_E(cs.exclusive_jets(cut)); + else if (exclusive == 2) + pjets = fastjet::sorted_by_E(cs.exclusive_jets(int(cut))); + else if (exclusive == 3) + pjets = fastjet::sorted_by_E(cs.exclusive_jets_up_to(int(cut))); + else if (exclusive == 4) + pjets = fastjet::sorted_by_E(cs.exclusive_jets_ycut(cut)); + } + return pjets; +} + +std::vector exclusive_dmerge(fastjet::ClusterSequence &cs, + int do_dmarge_max) { + const int Nmax = Nmax_dmerge; + std::vector result; + for (int i = 1; i <= Nmax; i++) { + float d; + const int j = i; + if (do_dmarge_max == 0) + d = cs.exclusive_dmerge(j); + else + d = cs.exclusive_dmerge_max(j); + result.push_back(d); + } + return result; +} + +bool check(unsigned int n, int exclusive, float cut) { + if (exclusive > 0 && n <= int(cut)) + return false; + return true; +} + +fastjet::RecombinationScheme recomb_scheme(int recombination) { + fastjet::RecombinationScheme recomb_scheme; + + if (recombination == 0) + recomb_scheme = fastjet::RecombinationScheme::E_scheme; + else if (recombination == 1) + recomb_scheme = fastjet::RecombinationScheme::pt_scheme; + else if (recombination == 2) + recomb_scheme = fastjet::RecombinationScheme::pt2_scheme; + else if (recombination == 3) + recomb_scheme = fastjet::RecombinationScheme::Et_scheme; + else if (recombination == 4) + recomb_scheme = fastjet::RecombinationScheme::Et2_scheme; + else if (recombination == 5) + recomb_scheme = fastjet::RecombinationScheme::BIpt_scheme; + else if (recombination == 6) + recomb_scheme = fastjet::RecombinationScheme::BIpt2_scheme; + else + recomb_scheme = fastjet::RecombinationScheme::external_scheme; + + return recomb_scheme; +} + +resonanceBuilder::resonanceBuilder(float arg_resonance_mass) { + m_resonance_mass = arg_resonance_mass; +} +ROOT::VecOps::RVec +resonanceBuilder::operator()(ROOT::VecOps::RVec legs) { + ROOT::VecOps::RVec result; + int n = legs.size(); + if (n > 1) { + ROOT::VecOps::RVec v(n); + std::fill(v.end() - 2, v.end(), true); + do { + TLorentzVector reso; + TLorentzVector reso_lv; + for (int i = 0; i < n; ++i) { + if (v[i]) { + TLorentzVector leg_lv; + leg_lv.SetXYZM(legs[i].px(), legs[i].py(), legs[i].pz(), legs[i].m()); + reso_lv += leg_lv; } - result.constituents.push_back(tmpvec); - } - result.exclusive_dmerge = dmerge; - result.exclusive_dmerge_max = dmerge_max; - return result; - } - - std::vector build_jets(fastjet::ClusterSequence& cs, int exclusive, float cut, int sorted) { - std::vector pjets; - - if (sorted == 0) { - if (exclusive == 0) - pjets = fastjet::sorted_by_pt(cs.inclusive_jets(cut)); - else if (exclusive == 1) - pjets = fastjet::sorted_by_pt(cs.exclusive_jets(cut)); - else if (exclusive == 2) - pjets = fastjet::sorted_by_pt(cs.exclusive_jets(int(cut))); - else if (exclusive == 3) - pjets = fastjet::sorted_by_pt(cs.exclusive_jets_up_to(int(cut))); - else if (exclusive == 4) - pjets = fastjet::sorted_by_pt(cs.exclusive_jets_ycut(cut)); - } else if (sorted == 1) { - if (exclusive == 0) - pjets = fastjet::sorted_by_E(cs.inclusive_jets(cut)); - else if (exclusive == 1) - pjets = fastjet::sorted_by_E(cs.exclusive_jets(cut)); - else if (exclusive == 2) - pjets = fastjet::sorted_by_E(cs.exclusive_jets(int(cut))); - else if (exclusive == 3) - pjets = fastjet::sorted_by_E(cs.exclusive_jets_up_to(int(cut))); - else if (exclusive == 4) - pjets = fastjet::sorted_by_E(cs.exclusive_jets_ycut(cut)); } - return pjets; - } - - std::vector exclusive_dmerge(fastjet::ClusterSequence& cs, int do_dmarge_max) { - const int Nmax = Nmax_dmerge; - std::vector result; - for (int i = 1; i <= Nmax; i++) { - float d; - const int j = i; - if (do_dmarge_max == 0) - d = cs.exclusive_dmerge(j); - else - d = cs.exclusive_dmerge_max(j); - result.push_back(d); - } - return result; - } - - bool check(unsigned int n, int exclusive, float cut) { - if (exclusive > 0 && n <= int(cut)) - return false; - return true; - } - - fastjet::RecombinationScheme recomb_scheme(int recombination) { - fastjet::RecombinationScheme recomb_scheme; - - if (recombination == 0) - recomb_scheme = fastjet::RecombinationScheme::E_scheme; - else if (recombination == 1) - recomb_scheme = fastjet::RecombinationScheme::pt_scheme; - else if (recombination == 2) - recomb_scheme = fastjet::RecombinationScheme::pt2_scheme; - else if (recombination == 3) - recomb_scheme = fastjet::RecombinationScheme::Et_scheme; - else if (recombination == 4) - recomb_scheme = fastjet::RecombinationScheme::Et2_scheme; - else if (recombination == 5) - recomb_scheme = fastjet::RecombinationScheme::BIpt_scheme; - else if (recombination == 6) - recomb_scheme = fastjet::RecombinationScheme::BIpt2_scheme; - else - recomb_scheme = fastjet::RecombinationScheme::external_scheme; - - return recomb_scheme; - } - - } // namespace JetClusteringUtils - -} // namespace FCCAnalyses + result.emplace_back(reso_lv); + } while (std::next_permutation(v.begin(), v.end())); + } + if (result.size() > 1) { + auto resonancesort = [&](fastjet::PseudoJet i, fastjet::PseudoJet j) { + return (abs(m_resonance_mass - i.m()) < abs(m_resonance_mass - j.m())); + }; + std::sort(result.begin(), result.end(), resonancesort); + ROOT::VecOps::RVec::const_iterator first = + result.begin(); + ROOT::VecOps::RVec::const_iterator last = + result.begin() + 1; + ROOT::VecOps::RVec onlyBestReso(first, last); + return onlyBestReso; + } else { + return result; + } +} +recoilBuilder::recoilBuilder(float arg_sqrts) : m_sqrts(arg_sqrts){}; +double recoilBuilder::operator()(ROOT::VecOps::RVec in) { + double result; + auto recoil_p4 = TLorentzVector(0, 0, 0, m_sqrts); + for (auto &v1 : in) { + TLorentzVector tv1; + tv1.SetPxPyPzE(v1.px(), v1.py(), v1.pz(), v1.e()); + recoil_p4 -= tv1; + } + result = recoil_p4.M(); + return result; +} + +} // namespace JetClusteringUtils + +} // namespace FCCAnalyses diff --git a/analyzers/dataframe/src/JetConstituentsUtils.cc b/analyzers/dataframe/src/JetConstituentsUtils.cc index ab0e8d131c..0fead9fa72 100644 --- a/analyzers/dataframe/src/JetConstituentsUtils.cc +++ b/analyzers/dataframe/src/JetConstituentsUtils.cc @@ -6,12 +6,12 @@ #include "edm4hep/Track.h" #include "edm4hep/TrackerHitData.h" #include "edm4hep/TrackData.h" +#include "edm4hep/Cluster.h" +#include "edm4hep/ClusterData.h" +#include "edm4hep/CalorimeterHitData.h" #include "edm4hep/ReconstructedParticleData.h" - - - #include "FCCAnalyses/JetClusteringUtils.h" -//#include "FCCAnalyses/ExternalRecombiner.h" +// #include "FCCAnalyses/ExternalRecombiner.h" #include "fastjet/JetDefinition.hh" #include "fastjet/PseudoJet.hh" #include "fastjet/Selector.hh" @@ -23,45 +23,54 @@ ************************ */ -namespace FCCAnalyses { - namespace JetConstituentsUtils { - rv::RVec build_constituents(const rv::RVec& jets, - const rv::RVec& rps) { +namespace FCCAnalyses +{ + namespace JetConstituentsUtils + { + rv::RVec build_constituents(const rv::RVec &jets, + const rv::RVec &rps) + { rv::RVec jcs; - for (const auto& jet : jets) { - auto& jc = jcs.emplace_back(); - float energy_jet = jet.energy; - float energy_const = 0; - for (auto it = jet.particles_begin; it < jet.particles_end; ++it) { + for (const auto &jet : jets) + { + auto &jc = jcs.emplace_back(); + float energy_jet = jet.energy; + float energy_const = 0; + for (auto it = jet.particles_begin; it < jet.particles_end; ++it) + { jc.emplace_back(rps.at(it)); - energy_const += rps.at(it).energy; - } + energy_const += rps.at(it).energy; + } } return jcs; } - rv::RVec build_constituents_cluster(const rv::RVec& rps, - const std::vector>& indices) { + rv::RVec build_constituents_cluster(const rv::RVec &rps, + const std::vector> &indices) + { rv::RVec jcs; - for (const auto& jet_index : indices) { - FCCAnalysesJetConstituents jc; - for(const auto& const_index : jet_index) { - jc.push_back(rps.at(const_index)); - } - jcs.push_back(jc); + for (const auto &jet_index : indices) + { + FCCAnalysesJetConstituents jc; + for (const auto &const_index : jet_index) + { + jc.push_back(rps.at(const_index)); + } + jcs.push_back(jc); } return jcs; } - - FCCAnalysesJetConstituents get_jet_constituents(const rv::RVec& csts, int jet) { + FCCAnalysesJetConstituents get_jet_constituents(const rv::RVec &csts, int jet) + { if (jet < 0) return FCCAnalysesJetConstituents(); return csts.at(jet); } - rv::RVec get_constituents(const rv::RVec& csts, - const rv::RVec& jets) { + rv::RVec get_constituents(const rv::RVec &csts, + const rv::RVec &jets) + { rv::RVec jcs; for (size_t i = 0; i < jets.size(); ++i) if (jets.at(i) >= 0) @@ -72,298 +81,364 @@ namespace FCCAnalyses { /// recasting helper for jet constituents methods /// \param[in] jcs collection of jets constituents /// \param[in] meth variables retrieval method for constituents - auto cast_constituent = [](const auto& jcs, auto&& meth) { + auto cast_constituent = [](const auto &jcs, auto &&meth) + { rv::RVec out; - for (const auto& jc : jcs) + for (const auto &jc : jcs) out.emplace_back(meth(jc)); return out; }; - /// This function simply applies the 2 args functions per vector of Rec Particles to a vector of vectors of Rec Particles - auto cast_constituent_2 = [](const auto& jcs, const auto& coll, auto&& meth) { + auto cast_constituent_2 = [](const auto &jcs, const auto &coll, auto &&meth) + { rv::RVec out; - for (const auto& jc : jcs) { + for (const auto &jc : jcs) + { out.emplace_back(meth(jc, coll)); } return out; }; - auto cast_constituent_4 = [](const auto& jcs, const auto& coll1, const auto& coll2, const auto& coll3,auto&& meth) { + auto cast_constituent_3 = [](const auto &jcs, const auto &coll1, const auto &coll2, auto &&meth) + { + rv::RVec out; + for (const auto &jc : jcs) + { + out.emplace_back(meth(jc, coll1, coll2)); + } + return out; + }; + + auto cast_constituent_4 = [](const auto &jcs, const auto &coll1, const auto &coll2, const auto &coll3, auto &&meth) + { rv::RVec out; - for (const auto& jc : jcs) { + for (const auto &jc : jcs) + { out.emplace_back(meth(jc, coll1, coll2, coll3)); } return out; }; - - - rv::RVec get_Bz(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_Bz(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_Bz); } - - - rv::RVec get_pt(const rv::RVec& jcs) { + + rv::RVec get_pt(const rv::RVec &jcs) + { return cast_constituent(jcs, ReconstructedParticle::get_pt); } - - rv::RVec get_e(const rv::RVec& jcs) { + + rv::RVec get_p(const rv::RVec &jcs) + { + return cast_constituent(jcs, ReconstructedParticle::get_p); + } + + rv::RVec get_e(const rv::RVec &jcs) + { return cast_constituent(jcs, ReconstructedParticle::get_e); } - - rv::RVec get_theta(const rv::RVec& jcs) { + + rv::RVec get_theta(const rv::RVec &jcs) + { return cast_constituent(jcs, ReconstructedParticle::get_theta); } - rv::RVec get_phi(const rv::RVec& jcs) { + rv::RVec get_phi(const rv::RVec &jcs) + { return cast_constituent(jcs, ReconstructedParticle::get_phi); } - rv::RVec get_type(const rv::RVec& jcs) { + rv::RVec get_type(const rv::RVec &jcs) + { return cast_constituent(jcs, ReconstructedParticle::get_type); } - rv::RVec get_charge(const rv::RVec& jcs) { + rv::RVec get_charge(const rv::RVec &jcs) + { return cast_constituent(jcs, ReconstructedParticle::get_charge); } - - //sorting - ROOT::VecOps::RVec jets_sorting_on_nconst(const rv::RVec& jets){ + // sorting + ROOT::VecOps::RVec jets_sorting_on_nconst(const rv::RVec &jets) + { ROOT::VecOps::RVec nconst; ROOT::VecOps::RVec out; - for (const auto& jet : jets) { - nconst.push_back(jet.particles_end - jet.particles_begin); + for (const auto &jet : jets) + { + nconst.push_back(jet.particles_end - jet.particles_begin); } auto indices = ROOT::VecOps::Argsort(nconst); - for (int index = 0; index < jets.size(); ++index) { - out.push_back( jets.at( indices.at(indices.size()-1-index) ) ); + for (int index = 0; index < jets.size(); ++index) + { + out.push_back(jets.at(indices.at(indices.size() - 1 - index))); } return out; } - ROOT::VecOps::RVec jets_sorting_on_energy(const rv::RVec& jets){ + ROOT::VecOps::RVec jets_sorting_on_energy(const rv::RVec &jets) + { ROOT::VecOps::RVec energy; ROOT::VecOps::RVec out; - for (const auto& jet : jets) { + for (const auto &jet : jets) + { energy.push_back(jet.energy); } auto indices = ROOT::VecOps::Argsort(energy); - for (int index = 0; index < jets.size(); ++index) { - out.push_back( jets.at( indices.at(indices.size()-1-index) ) ); + for (int index = 0; index < jets.size(); ++index) + { + out.push_back(jets.at(indices.at(indices.size() - 1 - index))); } return out; } - //displacement (wrt (0,0,0)) - rv::RVec get_d0(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + // displacement (wrt (0,0,0)) + rv::RVec get_d0(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_D0); } - - rv::RVec get_z0(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_z0(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_Z0); } - rv::RVec get_phi0(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_phi0(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi); } - rv::RVec get_omega(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_omega(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_omega); } - rv::RVec get_tanLambda(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_tanLambda(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_tanLambda); } - - rv::RVec XPtoPar_dxy(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks, - const TVector3& V, - const float& Bz) { - - return cast_constituent_4(jcs, tracks, V, Bz, ReconstructedParticle2Track::XPtoPar_dxy); + rv::RVec XPtoPar_dxy(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks, + const TLorentzVector &V, // primary vertex posotion and time in mm + const float &Bz) + { + + return cast_constituent_4(jcs, tracks, V, Bz, ReconstructedParticle2Track::XPtoPar_dxy); } - rv::RVec XPtoPar_dz(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks, - const TVector3& V, - const float& Bz) { + rv::RVec XPtoPar_dz(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks, + const TLorentzVector &V, // primary vertex posotion and time in mm + const float &Bz) + { return cast_constituent_4(jcs, tracks, V, Bz, ReconstructedParticle2Track::XPtoPar_dz); } - rv::RVec XPtoPar_phi(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks, - const TVector3& V, - const float& Bz) { + rv::RVec XPtoPar_phi(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks, + const TLorentzVector &V, // primary vertex posotion and time in mm + const float &Bz) + { return cast_constituent_4(jcs, tracks, V, Bz, ReconstructedParticle2Track::XPtoPar_phi); } - rv::RVec XPtoPar_C(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks, - const TVector3& V, - const float& Bz) { - - return cast_constituent_4(jcs, tracks, V, Bz, ReconstructedParticle2Track::XPtoPar_C); + rv::RVec XPtoPar_C(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks, + const float &Bz) + { + + return cast_constituent_3(jcs, tracks, Bz, ReconstructedParticle2Track::XPtoPar_C); } - rv::RVec XPtoPar_ct(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks, - const TVector3& V, - const float& Bz) { - - return cast_constituent_4(jcs, tracks, V, Bz, ReconstructedParticle2Track::XPtoPar_ct); + rv::RVec XPtoPar_ct(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks, + const float &Bz) + { + + return cast_constituent_3(jcs, tracks, Bz, ReconstructedParticle2Track::XPtoPar_ct); } - - //Covariance matrix elements of tracks parameters - //diagonal - rv::RVec get_omega_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + // Covariance matrix elements of tracks parameters + // diagonal + rv::RVec get_omega_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_omega_cov); } - rv::RVec get_d0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_d0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_D0_cov); } - - rv::RVec get_z0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_z0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_Z0_cov); } - rv::RVec get_phi0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_phi0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi_cov); } - rv::RVec get_tanlambda_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_tanlambda_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_tanLambda_cov); } - //off-diagonal - rv::RVec get_d0_z0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + // off-diagonal + rv::RVec get_d0_z0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_d0_z0_cov); } - - rv::RVec get_phi0_d0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_phi0_d0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_d0_phi0_cov); } - - rv::RVec get_phi0_z0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_phi0_z0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi0_z0_cov); } - - rv::RVec get_tanlambda_phi0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_tanlambda_phi0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi0_tanlambda_cov); } - - rv::RVec get_tanlambda_d0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_tanlambda_d0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_d0_tanlambda_cov); } - - rv::RVec get_tanlambda_z0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_tanlambda_z0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_z0_tanlambda_cov); } - - rv::RVec get_omega_tanlambda_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_omega_tanlambda_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_omega_tanlambda_cov); } - - rv::RVec get_omega_phi0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_omega_phi0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi0_omega_cov); } - - rv::RVec get_omega_d0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_omega_d0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_d0_omega_cov); } - - rv::RVec get_omega_z0_cov(const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + + rv::RVec get_omega_z0_cov(const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { return cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_omega_z0_cov); } - - //neutrals are set to 0; muons and electrons are also set to 0; - // only charged hads are considered (mtof used to disctriminate charged kaons and pions) - rv::RVec get_dndx(const rv::RVec& jcs, - const rv::RVec& dNdx, //ETrackFlow_2 - const rv::RVec& trackdata, //Eflowtrack - const rv::RVec JetsConstituents_isChargedHad) { + // neutrals are set to 0; muons and electrons are also set to 0; + // only charged hads are considered (mtof used to disctriminate charged kaons and pions) + rv::RVec get_dndx(const rv::RVec &jcs, + const rv::RVec &dNdx, // ETrackFlow_2 + const rv::RVec &trackdata, // Eflowtrack + const rv::RVec JetsConstituents_isChargedHad) + { rv::RVec out; - for(int i = 0; i < jcs.size(); ++i){ - FCCAnalysesJetConstituents ct = jcs.at(i); - FCCAnalysesJetConstituentsData isChargedHad = JetsConstituents_isChargedHad.at(i); - FCCAnalysesJetConstituentsData tmp; - for(int j = 0; j < ct.size(); ++j) { - if (ct.at(j).tracks_begin < trackdata.size() && (int)isChargedHad.at(j) == 1) { - tmp.push_back( dNdx.at( trackdata.at(ct.at(j).tracks_begin).dxQuantities_begin).value ); - } else { - tmp.push_back(0.); - } - } - out.push_back(tmp); + for (int i = 0; i < jcs.size(); ++i) + { + FCCAnalysesJetConstituents ct = jcs.at(i); + FCCAnalysesJetConstituentsData isChargedHad = JetsConstituents_isChargedHad.at(i); + FCCAnalysesJetConstituentsData tmp; + for (int j = 0; j < ct.size(); ++j) + { + if (ct.at(j).tracks_begin < trackdata.size() && (int)isChargedHad.at(j) == 1) + { + tmp.push_back(dNdx.at(trackdata.at(ct.at(j).tracks_begin).dxQuantities_begin).value / 1000.); + } + else + { + tmp.push_back(0.); + } + } + out.push_back(tmp); } return out; } - rv::RVec get_Sip2dVal(const rv::RVec& jets, - const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_Sip2dVal(const rv::RVec &jets, + const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { rv::RVec out; rv::RVec D0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_D0); rv::RVec phi0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi); - - - for(int i = 0; i < jets.size(); ++i){ - TVector2 p(jets[i].momentum.x, jets[i].momentum.y); - FCCAnalysesJetConstituentsData cprojs; - for (int j = 0; j < jcs[i].size(); ++j) { - if (D0.at(i).at(j) != -9) { - TVector2 d0( - D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)) , D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j)) ); - cprojs.push_back( TMath::Sign(1, d0*p) * fabs(D0.at(i).at(j)) ); - } else { - cprojs.push_back(-9.); - } - } - out.push_back(cprojs); + + for (int i = 0; i < jets.size(); ++i) + { + TVector2 p(jets[i].momentum.x, jets[i].momentum.y); + FCCAnalysesJetConstituentsData cprojs; + for (int j = 0; j < jcs[i].size(); ++j) + { + if (D0.at(i).at(j) != -9) + { + TVector2 d0(-D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)), D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j))); + cprojs.push_back(TMath::Sign(1, d0 * p) * fabs(D0.at(i).at(j))); + } + else + { + cprojs.push_back(-9.); + } + } + out.push_back(cprojs); } return out; } - rv::RVec get_Sip2dVal_cluster(const rv::RVec& jets, - const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_Sip2dVal_cluster(const rv::RVec &jets, + const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { rv::RVec out; rv::RVec D0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_D0); rv::RVec phi0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi); - for(int i = 0; i < jets.size(); ++i){ + for (int i = 0; i < jets.size(); ++i) + { TVector2 p(jets[i].px(), jets[i].py()); FCCAnalysesJetConstituentsData cprojs; - for (int j = 0; j < jcs[i].size(); ++j) { - if (D0.at(i).at(j) != -9) { - TVector2 d0( - D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)) , D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j)) ); - cprojs.push_back( TMath::Sign(1, d0*p) * fabs(D0.at(i).at(j)) ); - } else { + for (int j = 0; j < jcs[i].size(); ++j) + { + if (D0.at(i).at(j) != -9) + { + TVector2 d0(-D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)), D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j))); + cprojs.push_back(TMath::Sign(1, d0 * p) * fabs(D0.at(i).at(j))); + } + else + { cprojs.push_back(-9.); } } @@ -372,22 +447,26 @@ namespace FCCAnalyses { return out; } - - rv::RVec get_Sip2dVal_clusterV(const rv::RVec& jets, - const rv::RVec& D0, - const rv::RVec& phi0, - const TVector3& V, - const float Bz) { + rv::RVec get_Sip2dVal_clusterV(const rv::RVec &jets, + const rv::RVec &D0, + const rv::RVec &phi0, + const float Bz) + { rv::RVec out; - for(int i = 0; i < jets.size(); ++i){ + for (int i = 0; i < jets.size(); ++i) + { TVector2 p(jets[i].px(), jets[i].py()); FCCAnalysesJetConstituentsData cprojs; - for (int j = 0; j < D0[i].size(); ++j) { - if (D0.at(i).at(j) != -9) { - TVector2 d0( - D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)) , D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j)) ); - cprojs.push_back( TMath::Sign(1, d0*p) * fabs(D0.at(i).at(j)) ); - } else { + for (int j = 0; j < D0[i].size(); ++j) + { + if (D0.at(i).at(j) != -9) + { + TVector2 d0(-D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)), D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j))); + cprojs.push_back(TMath::Sign(1, d0 * p) * fabs(D0.at(i).at(j))); + } + else + { cprojs.push_back(-9.); } } @@ -396,67 +475,83 @@ namespace FCCAnalyses { return out; } - /// The functions get_Sip2dSig and get_Sip2dVal can be made independent; /// I passed to the former the result of the latter, avoiding the recomputation - rv::RVec get_Sip2dSig(const rv::RVec& Sip2dVals, - const rv::RVec& err2_D0) { + rv::RVec get_Sip2dSig(const rv::RVec &Sip2dVals, + const rv::RVec &err2_D0) + { rv::RVec out; - for(int i = 0; i < Sip2dVals.size(); ++i) { - FCCAnalysesJetConstituentsData s; - for(int j = 0; j < Sip2dVals.at(i).size(); ++j) { - if(err2_D0.at(i).at(j) > 0) { - s.push_back( Sip2dVals.at(i).at(j)/std::sqrt(err2_D0.at(i).at(j)) ); - } else { - s.push_back(-9); - } - } - out.push_back(s); + for (int i = 0; i < Sip2dVals.size(); ++i) + { + FCCAnalysesJetConstituentsData s; + for (int j = 0; j < Sip2dVals.at(i).size(); ++j) + { + if (err2_D0.at(i).at(j) > 0) + { + s.push_back(Sip2dVals.at(i).at(j) / std::sqrt(err2_D0.at(i).at(j))); + } + else + { + s.push_back(-9); + } + } + out.push_back(s); } return out; } - - rv::RVec get_Sip3dVal(const rv::RVec& jets, - const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_Sip3dVal(const rv::RVec &jets, + const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { rv::RVec out; rv::RVec D0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_D0); rv::RVec Z0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_Z0); rv::RVec phi0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi); - - for(int i = 0; i < jets.size(); ++i){ - TVector3 p(jets[i].momentum.x, jets[i].momentum.y, jets[i].momentum.z); - FCCAnalysesJetConstituentsData cprojs; - for (int j = 0; j < jcs[i].size(); ++j){ - if(D0.at(i).at(j) != -9) { - TVector3 d( - D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)) , D0.at(i).at(j)*TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j) ); - cprojs.push_back( TMath::Sign(1, d*p) * fabs( sqrt( D0.at(i).at(j)*D0.at(i).at(j) + Z0.at(i).at(j)*Z0.at(i).at(j) ) ) ); - } else { - cprojs.push_back(-9); - } - } - out.push_back(cprojs); - } + + for (int i = 0; i < jets.size(); ++i) + { + TVector3 p(jets[i].momentum.x, jets[i].momentum.y, jets[i].momentum.z); + FCCAnalysesJetConstituentsData cprojs; + for (int j = 0; j < jcs[i].size(); ++j) + { + if (D0.at(i).at(j) != -9) + { + TVector3 d(-D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)), D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j)); + cprojs.push_back(TMath::Sign(1, d * p) * fabs(sqrt(D0.at(i).at(j) * D0.at(i).at(j) + Z0.at(i).at(j) * Z0.at(i).at(j)))); + } + else + { + cprojs.push_back(-9); + } + } + out.push_back(cprojs); + } return out; } - rv::RVec get_Sip3dVal_cluster(const rv::RVec& jets, - const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_Sip3dVal_cluster(const rv::RVec &jets, + const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { rv::RVec out; rv::RVec D0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_D0); rv::RVec Z0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_Z0); rv::RVec phi0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi); - for(int i = 0; i < jets.size(); ++i){ + for (int i = 0; i < jets.size(); ++i) + { TVector3 p(jets[i].px(), jets[i].py(), jets[i].pz()); FCCAnalysesJetConstituentsData cprojs; - for (int j = 0; j < jcs[i].size(); ++j){ - if(D0.at(i).at(j) != -9) { - TVector3 d( - D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)) , D0.at(i).at(j)*TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j) ); - cprojs.push_back( TMath::Sign(1, d*p) * fabs( sqrt( D0.at(i).at(j)*D0.at(i).at(j) + Z0.at(i).at(j)*Z0.at(i).at(j) ) ) ); - } else { + for (int j = 0; j < jcs[i].size(); ++j) + { + if (D0.at(i).at(j) != -9) + { + TVector3 d(-D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)), D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j)); + cprojs.push_back(TMath::Sign(1, d * p) * fabs(sqrt(D0.at(i).at(j) * D0.at(i).at(j) + Z0.at(i).at(j) * Z0.at(i).at(j)))); + } + else + { cprojs.push_back(-9); } } @@ -465,23 +560,27 @@ namespace FCCAnalyses { return out; } - - rv::RVec get_Sip3dVal_clusterV(const rv::RVec& jets, - const rv::RVec& D0, - const rv::RVec& Z0, - const rv::RVec& phi0, - const TVector3& V, - const float Bz) { + rv::RVec get_Sip3dVal_clusterV(const rv::RVec &jets, + const rv::RVec &D0, + const rv::RVec &Z0, + const rv::RVec &phi0, + const float Bz) + { rv::RVec out; - for(int i = 0; i < jets.size(); ++i){ + for (int i = 0; i < jets.size(); ++i) + { TVector3 p(jets[i].px(), jets[i].py(), jets[i].pz()); FCCAnalysesJetConstituentsData cprojs; - for (int j = 0; j < D0[i].size(); ++j){ - if(D0.at(i).at(j) != -9) { - TVector3 d( - D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)) , D0.at(i).at(j)*TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j) ); - cprojs.push_back( TMath::Sign(1, d*p) * fabs( sqrt( D0.at(i).at(j)*D0.at(i).at(j) + Z0.at(i).at(j)*Z0.at(i).at(j) ) ) ); - } else { + for (int j = 0; j < D0[i].size(); ++j) + { + if (D0.at(i).at(j) != -9) + { + TVector3 d(-D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)), D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j)); + cprojs.push_back(TMath::Sign(1, d * p) * fabs(sqrt(D0.at(i).at(j) * D0.at(i).at(j) + Z0.at(i).at(j) * Z0.at(i).at(j)))); + } + else + { cprojs.push_back(-9); } } @@ -490,73 +589,88 @@ namespace FCCAnalyses { return out; } - - rv::RVec get_Sip3dSig(const rv::RVec& Sip3dVals, - const rv::RVec& err2_D0, - const rv::RVec& err2_Z0) { + rv::RVec get_Sip3dSig(const rv::RVec &Sip3dVals, + const rv::RVec &err2_D0, + const rv::RVec &err2_Z0) + { rv::RVec out; - for(int i = 0; i < Sip3dVals.size(); ++i) { - FCCAnalysesJetConstituentsData s; - for(int j = 0; j < Sip3dVals.at(i).size(); ++j) { - if (err2_D0.at(i).at(j) > 0.) { - s.push_back( Sip3dVals.at(i).at(j)/sqrt( err2_D0.at(i).at(j) + err2_Z0.at(i).at(j) ) ); - } else { - s.push_back(-9); - } - } - out.push_back(s); + for (int i = 0; i < Sip3dVals.size(); ++i) + { + FCCAnalysesJetConstituentsData s; + for (int j = 0; j < Sip3dVals.at(i).size(); ++j) + { + if (err2_D0.at(i).at(j) > 0.) + { + s.push_back(Sip3dVals.at(i).at(j) / sqrt(err2_D0.at(i).at(j) + err2_Z0.at(i).at(j))); + } + else + { + s.push_back(-9); + } + } + out.push_back(s); } return out; } - - rv::RVec get_JetDistVal(const rv::RVec& jets, - const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_JetDistVal(const rv::RVec &jets, + const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { rv::RVec out; rv::RVec D0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_D0); rv::RVec Z0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_Z0); rv::RVec phi0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi); - for(int i = 0; i < jets.size(); ++i){ - FCCAnalysesJetConstituentsData tmp; - TVector3 p_jet(jets[i].momentum.x, jets[i].momentum.y, jets[i].momentum.z); - FCCAnalysesJetConstituents ct = jcs.at(i); - for(int j = 0; j < ct.size(); ++j) { - if (D0.at(i).at(j) != -9) { - TVector3 d( - D0.at(i).at(j)* TMath::Sin(phi0.at(i).at(j)) , D0.at(i).at(j)*TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j) ); - TVector3 p_ct(ct[j].momentum.x, ct[j].momentum.y, ct[j].momentum.z); - TVector3 r_jet(0.0, 0.0, 0.0); - TVector3 n = p_ct.Cross(p_jet).Unit(); //What if they are parallel? - tmp.push_back( n.Dot(d - r_jet) ); - } else { - tmp.push_back(-9); - } - } - out.push_back(tmp); + for (int i = 0; i < jets.size(); ++i) + { + FCCAnalysesJetConstituentsData tmp; + TVector3 p_jet(jets[i].momentum.x, jets[i].momentum.y, jets[i].momentum.z); + FCCAnalysesJetConstituents ct = jcs.at(i); + for (int j = 0; j < ct.size(); ++j) + { + if (D0.at(i).at(j) != -9) + { + TVector3 d(-D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)), D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j)); + TVector3 p_ct(ct[j].momentum.x, ct[j].momentum.y, ct[j].momentum.z); + TVector3 r_jet(0.0, 0.0, 0.0); + TVector3 n = p_ct.Cross(p_jet).Unit(); // What if they are parallel? + tmp.push_back(n.Dot(d - r_jet)); + } + else + { + tmp.push_back(-9); + } + } + out.push_back(tmp); } return out; } - - rv::RVec get_JetDistVal_cluster(const rv::RVec& jets, - const rv::RVec& jcs, - const ROOT::VecOps::RVec& tracks) { + rv::RVec get_JetDistVal_cluster(const rv::RVec &jets, + const rv::RVec &jcs, + const ROOT::VecOps::RVec &tracks) + { rv::RVec out; rv::RVec D0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_D0); rv::RVec Z0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_Z0); rv::RVec phi0 = cast_constituent_2(jcs, tracks, ReconstructedParticle2Track::getRP2TRK_phi); - for(int i = 0; i < jets.size(); ++i){ + for (int i = 0; i < jets.size(); ++i) + { FCCAnalysesJetConstituentsData tmp; TVector3 p_jet(jets[i].px(), jets[i].py(), jets[i].pz()); FCCAnalysesJetConstituents ct = jcs.at(i); - for(int j = 0; j < ct.size(); ++j) { - if (D0.at(i).at(j) != -9) { - TVector3 d( - D0.at(i).at(j)* TMath::Sin(phi0.at(i).at(j)) , D0.at(i).at(j)*TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j) ); + for (int j = 0; j < ct.size(); ++j) + { + if (D0.at(i).at(j) != -9) + { + TVector3 d(-D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)), D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j)); TVector3 p_ct(ct[j].momentum.x, ct[j].momentum.y, ct[j].momentum.z); TVector3 r_jet(0.0, 0.0, 0.0); - TVector3 n = p_ct.Cross(p_jet).Unit(); //What if they are parallel? - tmp.push_back( n.Dot(d - r_jet) ); - } else { + TVector3 n = p_ct.Cross(p_jet).Unit(); // What if they are parallel? + tmp.push_back(n.Dot(d - r_jet)); + } + else + { tmp.push_back(-9); } } @@ -565,27 +679,32 @@ namespace FCCAnalyses { return out; } - rv::RVec get_JetDistVal_clusterV(const rv::RVec& jets, - const rv::RVec& jcs, - const rv::RVec& D0, - const rv::RVec& Z0, - const rv::RVec& phi0, - const TVector3& V, - const float Bz) { + rv::RVec get_JetDistVal_clusterV(const rv::RVec &jets, + const rv::RVec &jcs, + const rv::RVec &D0, + const rv::RVec &Z0, + const rv::RVec &phi0, + const float Bz) + { rv::RVec out; - for(int i = 0; i < jets.size(); ++i){ + for (int i = 0; i < jets.size(); ++i) + { FCCAnalysesJetConstituentsData tmp; TVector3 p_jet(jets[i].px(), jets[i].py(), jets[i].pz()); FCCAnalysesJetConstituents ct = jcs.at(i); - for(int j = 0; j < ct.size(); ++j) { - if (D0.at(i).at(j) != -9) { - TVector3 d( - D0.at(i).at(j)* TMath::Sin(phi0.at(i).at(j)) , D0.at(i).at(j)*TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j) ); + for (int j = 0; j < ct.size(); ++j) + { + if (D0.at(i).at(j) != -9) + { + TVector3 d(-D0.at(i).at(j) * TMath::Sin(phi0.at(i).at(j)), D0.at(i).at(j) * TMath::Cos(phi0.at(i).at(j)), Z0.at(i).at(j)); TVector3 p_ct(ct[j].momentum.x, ct[j].momentum.y, ct[j].momentum.z); TVector3 r_jet(0.0, 0.0, 0.0); - TVector3 n = p_ct.Cross(p_jet).Unit(); //What if they are parallel? - tmp.push_back( n.Dot(d - r_jet) ); - } else { + TVector3 n = p_ct.Cross(p_jet).Unit(); // What if they are parallel? + tmp.push_back(n.Dot(d - r_jet)); + } + else + { tmp.push_back(-9); } } @@ -594,84 +713,151 @@ namespace FCCAnalyses { return out; } - - rv::RVec get_JetDistSig(const rv::RVec& JetDistVal, - const rv::RVec& err2_D0, - const rv::RVec& err2_Z0) { + rv::RVec get_JetDistSig(const rv::RVec &JetDistVal, + const rv::RVec &err2_D0, + const rv::RVec &err2_Z0) + { rv::RVec out; - for(int i = 0; i < JetDistVal.size(); ++i) { - FCCAnalysesJetConstituentsData tmp; - for(int j = 0; j < JetDistVal.at(i).size(); ++j) { - if (err2_D0.at(i).at(j) > 0) { - float err3d = std::sqrt(err2_D0.at(i).at(j) + err2_Z0.at(i).at(j)); - float jetdistsig = JetDistVal.at(i).at(j) / err3d; - tmp.push_back(jetdistsig); - } else { - tmp.push_back(-9.); - } - } - out.push_back(tmp); + for (int i = 0; i < JetDistVal.size(); ++i) + { + FCCAnalysesJetConstituentsData tmp; + for (int j = 0; j < JetDistVal.at(i).size(); ++j) + { + if (err2_D0.at(i).at(j) > 0) + { + float err3d = std::sqrt(err2_D0.at(i).at(j) + err2_Z0.at(i).at(j)); + float jetdistsig = JetDistVal.at(i).at(j) / err3d; + tmp.push_back(jetdistsig); + } + else + { + tmp.push_back(-9.); + } + } + out.push_back(tmp); } return out; } - - //we measure L, tof; mtof in GeV - //neutrals are set to 0; muons and electrons are set to their mass; - // only charged hads are considered (mtof used to disctriminate charged kaons and pions) - rv::RVec get_mtof(const rv::RVec& jcs, - const rv::RVec& track_L, - const rv::RVec& trackdata, - const rv::RVec& trackerhits, - const rv::RVec JetsConstituents_Pids) { + // we measure L, tof; mtof in GeV + // neutrals are set to 0; muons and electrons are set to their mass; + // only charged hads are considered (mtof used to disctriminate charged kaons and pions) + + // eventually will have to update this function to compute tof with respect to hard vertex + // reconstructed with a 4D algorithm + + // TODO: + // - extend MC vertex method to 4-vector to have time as well + // - recompute neutral L here using Vertex pos + // - check if approx possible for charged as well + // - use Tin from vertex + rv::RVec get_mtof(const rv::RVec &jcs, + const rv::RVec &track_L, + const rv::RVec &trackdata, + const rv::RVec &trackerhits, + const rv::RVec &gammadata, + const rv::RVec &nhdata, + const rv::RVec &calohits, + const TLorentzVector &V // primary vertex posotion and time in mm + ) + { rv::RVec out; - for(int i = 0; i < jcs.size(); ++i){ - FCCAnalysesJetConstituents ct = jcs.at(i); - FCCAnalysesJetConstituentsData pids = JetsConstituents_Pids.at(i); - FCCAnalysesJetConstituentsData tmp; - for(int j = 0; j < ct.size(); ++j) { - //if(ct.at(j).tracks_begin > 0 && ct.at(j).tracks_begin < trackdata.size()) { //??????????? CHECK!!! - if (ct.at(j).tracks_begin < trackdata.size()) { - if( abs(pids.at(j)) == 11) { - tmp.push_back(0.00051099895); - } else if (abs(pids.at(j)) == 13) { - tmp.push_back(105.65837); - } else { - float Tin = trackerhits.at(trackdata.at(ct.at(j).tracks_begin).trackerHits_begin).time; - float Tout = trackerhits.at(trackdata.at(ct.at(j).tracks_begin).trackerHits_end-1).time; //one track and two hits per recon. particle are assumed - float tof = (Tout - Tin); - float L = track_L.at(ct.at(j).tracks_begin) * 0.001; - //std::cout << "tof: " << tof << " - L: " << L << << std::endl; - float beta = L/(tof * 2.99792458e+8) ; - float p = std::sqrt( ct.at(j).momentum.x*ct.at(j).momentum.x + ct.at(j).momentum.y*ct.at(j).momentum.y + ct.at(j).momentum.z * ct.at(j).momentum.z ); - //std::cout << "tof: " << tof << " - L: " << L << " - beta: " << beta << " - momentum: " << p << " - mtof: " << p * std::sqrt(1/(beta*beta)-1) << std::endl; - if (beta < 1. && beta > 0.) { - tmp.push_back( p * std::sqrt(1/(beta*beta)-1) ); - } else { - tmp.push_back(0.13957039); - } - } - } else { - //float E = ct.at(j).energy; - //tmp.pushback( E * std::sqrt(1-beta*beta) ); - tmp.push_back(0.); - } - } - out.push_back(tmp); + for (int i = 0; i < jcs.size(); ++i) + { + FCCAnalysesJetConstituents ct = jcs.at(i); + FCCAnalysesJetConstituentsData tmp; + for (int j = 0; j < ct.size(); ++j) + { + if (ct.at(j).clusters_begin < nhdata.size() + gammadata.size()) + { + if (ct.at(j).type == 130) + { + // this assumes that in converter photons are filled first and nh after + float T = calohits.at(nhdata.at(ct.at(j).clusters_begin - gammadata.size()).hits_begin).time; + float X = calohits.at(nhdata.at(ct.at(j).clusters_begin - gammadata.size()).hits_begin).position.x; + float Y = calohits.at(nhdata.at(ct.at(j).clusters_begin - gammadata.size()).hits_begin).position.y; + float Z = calohits.at(nhdata.at(ct.at(j).clusters_begin - gammadata.size()).hits_begin).position.z; + + float tof = T; + // compute path length wrt to PV + float L = std::sqrt((X - V.X()) * (X - V.X()) + (Y - V.Y()) * (Y - V.Y()) + (Z - V.Z()) * (Z - V.Z())) * 0.001; + // std::cout << "tof n: " << T << " - L: " << L << std::endl; + float beta = L / (tof * 2.99792458e+8); + float E = ct.at(j).energy; + // std::cout << "tof: " << tof << " - L: " << L << " - beta: " << beta << " - energy: " << E <<" - true PID: "< 0.) + { + tmp.push_back(E * std::sqrt(1 - beta * beta)); + // std::cout << "mtof n:" << E * std::sqrt(1-beta*beta)<< std::endl; + } + else + { + // std::cout << "problem" << std::endl; + tmp.push_back((9.)); + } + } + else if (ct.at(j).type == 22) + { + tmp.push_back((0.)); + } + } + + if (ct.at(j).tracks_begin < trackdata.size()) + { + if (abs(ct.at(j).charge) > 0 and abs(ct.at(j).mass - 0.000510999) < 1.e-05) + { + tmp.push_back(0.000510999); + } + else if (abs(ct.at(j).charge) > 0 and abs(ct.at(j).mass - 0.105658) < 1.e-03) + { + tmp.push_back(0.105658); + } + else + { + + // this is the time of the track origin from MC + // float Tin = trackerhits.at(trackdata.at(ct.at(j).tracks_begin).trackerHits_begin).time; + + // time given by primary vertex + float Tin = V.T() * 1e-3 / 2.99792458e+8; + + float Tout = trackerhits.at(trackdata.at(ct.at(j).tracks_begin).trackerHits_end - 1).time; // one track and 3 hits per recon. particle are assumed + float tof = (Tout - Tin); + + // TODO: path length will have to be re-calculated from vertex position + float L = track_L.at(ct.at(j).tracks_begin) * 0.001; + // std::cout << "tof: " << tof << " - L: " << L << std::endl; + float beta = L / (tof * 2.99792458e+8); + float p = std::sqrt(ct.at(j).momentum.x * ct.at(j).momentum.x + ct.at(j).momentum.y * ct.at(j).momentum.y + ct.at(j).momentum.z * ct.at(j).momentum.z); + // std::cout << "tof: " << tof << " - L: " << L << " - beta: " << beta << " - momentum: " << p << " - mtof: " << p * std::sqrt(1/(beta*beta)-1) << std::endl; + if (beta < 1. && beta > 0.) + { + tmp.push_back(p * std::sqrt(1 / (beta * beta) - 1)); + } + else + { + tmp.push_back(0.13957039); + } + } + } + } + out.push_back(tmp); } return out; } - - //kinematics const/jet - rv::RVec get_erel_log(const rv::RVec& jets, - const rv::RVec& jcs) { + // kinematics const/jet + rv::RVec get_erel_log(const rv::RVec &jets, + const rv::RVec &jcs) + { rv::RVec out; - for (size_t i = 0; i < jets.size(); ++i) { - auto& jet_csts = out.emplace_back(); + for (size_t i = 0; i < jets.size(); ++i) + { + auto &jet_csts = out.emplace_back(); float e_jet = jets.at(i).energy; auto csts = get_jet_constituents(jcs, i); - for (const auto& jc : csts) { + for (const auto &jc : csts) + { float val = (e_jet > 0.) ? jc.energy / e_jet : 1.; float erel_log = float(std::log10(val)); jet_csts.emplace_back(erel_log); @@ -680,14 +866,17 @@ namespace FCCAnalyses { return out; } - rv::RVec get_erel_log_cluster(const rv::RVec& jets, - const rv::RVec& jcs) { + rv::RVec get_erel_log_cluster(const rv::RVec &jets, + const rv::RVec &jcs) + { rv::RVec out; - for (size_t i = 0; i < jets.size(); ++i) { - auto& jet_csts = out.emplace_back(); + for (size_t i = 0; i < jets.size(); ++i) + { + auto &jet_csts = out.emplace_back(); float e_jet = jets.at(i).E(); auto csts = get_jet_constituents(jcs, i); - for (const auto& jc : csts) { + for (const auto &jc : csts) + { float val = (e_jet > 0.) ? jc.energy / e_jet : 1.; float erel_log = float(std::log10(val)); jet_csts.emplace_back(erel_log); @@ -696,15 +885,17 @@ namespace FCCAnalyses { return out; } - - rv::RVec get_erel(const rv::RVec& jets, - const rv::RVec& jcs) { + rv::RVec get_erel(const rv::RVec &jets, + const rv::RVec &jcs) + { rv::RVec out; - for (size_t i = 0; i < jets.size(); ++i) { - auto& jet_csts = out.emplace_back(); + for (size_t i = 0; i < jets.size(); ++i) + { + auto &jet_csts = out.emplace_back(); double e_jet = jets.at(i).energy; auto csts = get_jet_constituents(jcs, i); - for (const auto& jc : csts) { + for (const auto &jc : csts) + { float val = (e_jet > 0.) ? jc.energy / e_jet : 1.; float erel = val; jet_csts.emplace_back(erel); @@ -713,14 +904,17 @@ namespace FCCAnalyses { return out; } - rv::RVec get_erel_cluster(const rv::RVec& jets, - const rv::RVec& jcs) { + rv::RVec get_erel_cluster(const rv::RVec &jets, + const rv::RVec &jcs) + { rv::RVec out; - for (size_t i = 0; i < jets.size(); ++i) { - auto& jet_csts = out.emplace_back(); + for (size_t i = 0; i < jets.size(); ++i) + { + auto &jet_csts = out.emplace_back(); double e_jet = jets.at(i).E(); auto csts = get_jet_constituents(jcs, i); - for (const auto& jc : csts) { + for (const auto &jc : csts) + { float val = (e_jet > 0.) ? jc.energy / e_jet : 1.; float erel = val; jet_csts.emplace_back(erel); @@ -729,17 +923,20 @@ namespace FCCAnalyses { return out; } - rv::RVec get_thetarel(const rv::RVec& jets, - const rv::RVec& jcs) { + rv::RVec get_thetarel(const rv::RVec &jets, + const rv::RVec &jcs) + { rv::RVec out; - for (size_t i = 0; i < jets.size(); ++i) { - auto& jet_csts = out.emplace_back(); + for (size_t i = 0; i < jets.size(); ++i) + { + auto &jet_csts = out.emplace_back(); TLorentzVector tlv_jet; tlv_jet.SetXYZM(jets.at(i).momentum.x, jets.at(i).momentum.y, jets.at(i).momentum.z, jets.at(i).mass); float theta_jet = tlv_jet.Theta(); float phi_jet = tlv_jet.Phi(); auto csts = get_jet_constituents(jcs, i); - for (const auto& jc : csts) { + for (const auto &jc : csts) + { TLorentzVector tlv_const; tlv_const.SetXYZM(jc.momentum.x, jc.momentum.y, jc.momentum.z, jc.mass); TVector3 v_const = tlv_const.Vect(); @@ -752,19 +949,22 @@ namespace FCCAnalyses { return out; } - rv::RVec get_thetarel_cluster(const rv::RVec& jets, - const rv::RVec& jcs) { + rv::RVec get_thetarel_cluster(const rv::RVec &jets, + const rv::RVec &jcs) + { rv::RVec out; - for (size_t i = 0; i < jets.size(); ++i) { - auto& jet_csts = out.emplace_back(); + for (size_t i = 0; i < jets.size(); ++i) + { + auto &jet_csts = out.emplace_back(); TLorentzVector tlv_jet; - tlv_jet.SetXYZM(jets.at(i).px(), jets.at(i).py(), jets.at(i).pz(), jets.at(i).E()); + tlv_jet.SetXYZM(jets.at(i).px(), jets.at(i).py(), jets.at(i).pz(), jets.at(i).m()); float theta_jet = tlv_jet.Theta(); float phi_jet = tlv_jet.Phi(); auto csts = get_jet_constituents(jcs, i); - for (const auto& jc : csts) { + for (const auto &jc : csts) + { TLorentzVector tlv_const; - tlv_const.SetXYZM(jc.momentum.x, jc.momentum.y, jc.momentum.z, jc.energy); + tlv_const.SetXYZM(jc.momentum.x, jc.momentum.y, jc.momentum.z, jc.mass); TVector3 v_const = tlv_const.Vect(); v_const.RotateZ(-phi_jet); v_const.RotateY(-theta_jet); @@ -775,18 +975,20 @@ namespace FCCAnalyses { return out; } - - rv::RVec get_phirel(const rv::RVec& jets, - const rv::RVec& jcs) { + rv::RVec get_phirel(const rv::RVec &jets, + const rv::RVec &jcs) + { rv::RVec out; - for (size_t i = 0; i < jets.size(); ++i) { - auto& jet_csts = out.emplace_back(); + for (size_t i = 0; i < jets.size(); ++i) + { + auto &jet_csts = out.emplace_back(); TLorentzVector tlv_jet; tlv_jet.SetXYZM(jets.at(i).momentum.x, jets.at(i).momentum.y, jets.at(i).momentum.z, jets.at(i).mass); float theta_jet = tlv_jet.Theta(); float phi_jet = tlv_jet.Phi(); auto csts = get_jet_constituents(jcs, i); - for (const auto& jc : csts) { + for (const auto &jc : csts) + { TLorentzVector tlv_const; tlv_const.SetXYZM(jc.momentum.x, jc.momentum.y, jc.momentum.z, jc.mass); TVector3 v_const = tlv_const.Vect(); @@ -799,17 +1001,20 @@ namespace FCCAnalyses { return out; } - rv::RVec get_phirel_cluster(const rv::RVec& jets, - const rv::RVec& jcs) { + rv::RVec get_phirel_cluster(const rv::RVec &jets, + const rv::RVec &jcs) + { rv::RVec out; - for (size_t i = 0; i < jets.size(); ++i) { - auto& jet_csts = out.emplace_back(); + for (size_t i = 0; i < jets.size(); ++i) + { + auto &jet_csts = out.emplace_back(); TLorentzVector tlv_jet; - tlv_jet.SetXYZM(jets.at(i).px(), jets.at(i).py(), jets.at(i).pz(), jets.at(i).E()); + tlv_jet.SetXYZM(jets.at(i).px(), jets.at(i).py(), jets.at(i).pz(), jets.at(i).m()); float theta_jet = tlv_jet.Theta(); float phi_jet = tlv_jet.Phi(); auto csts = get_jet_constituents(jcs, i); - for (const auto& jc : csts) { + for (const auto &jc : csts) + { TLorentzVector tlv_const; tlv_const.SetXYZM(jc.momentum.x, jc.momentum.y, jc.momentum.z, jc.mass); TVector3 v_const = tlv_const.Vect(); @@ -822,40 +1027,44 @@ namespace FCCAnalyses { return out; } + // Identification - - //Identification - - rv::RVec get_PIDs(const ROOT::VecOps::RVec< int > recin, - const ROOT::VecOps::RVec< int > mcin, - const rv::RVec& RecPart, - const rv::RVec& Particle, - const rv::RVec& jets) { + rv::RVec get_PIDs(const ROOT::VecOps::RVec recin, + const ROOT::VecOps::RVec mcin, + const rv::RVec &RecPart, + const rv::RVec &Particle, + const rv::RVec &jets) + { rv::RVec out; FCCAnalysesJetConstituentsData PIDs = FCCAnalyses::ReconstructedParticle2MC::getRP2MC_pdg(recin, mcin, RecPart, Particle); - - for (const auto& jet : jets) { - FCCAnalysesJetConstituentsData tmp; - for (auto it = jet.particles_begin; it < jet.particles_end; ++it) { + + for (const auto &jet : jets) + { + FCCAnalysesJetConstituentsData tmp; + for (auto it = jet.particles_begin; it < jet.particles_end; ++it) + { tmp.push_back(PIDs.at(it)); - } - out.push_back(tmp); + } + out.push_back(tmp); } return out; } - rv::RVec get_PIDs_cluster(const ROOT::VecOps::RVec< int > recin, - const ROOT::VecOps::RVec< int > mcin, - //const rv::RVec& jcs, - const rv::RVec& RecPart, - const rv::RVec& Particle, - const std::vector>& indices) { + rv::RVec get_PIDs_cluster(const ROOT::VecOps::RVec recin, + const ROOT::VecOps::RVec mcin, + // const rv::RVec& jcs, + const rv::RVec &RecPart, + const rv::RVec &Particle, + const std::vector> &indices) + { rv::RVec out; FCCAnalysesJetConstituentsData PIDs = FCCAnalyses::ReconstructedParticle2MC::getRP2MC_pdg(recin, mcin, RecPart, Particle); - for (const auto& jet_index : indices) { + for (const auto &jet_index : indices) + { FCCAnalysesJetConstituentsData tmp; - for (const auto& const_index : jet_index) { + for (const auto &const_index : jet_index) + { tmp.push_back(PIDs.at(const_index)); } out.push_back(tmp); @@ -863,225 +1072,306 @@ namespace FCCAnalyses { return out; } + rv::RVec get_isEl(const rv::RVec &jcs) + { + rv::RVec out; + for (int i = 0; i < jcs.size(); ++i) + { + FCCAnalysesJetConstituentsData is_El; + FCCAnalysesJetConstituents ct = jcs.at(i); + for (int j = 0; j < ct.size(); ++j) + { + if (abs(ct.at(j).charge) > 0 and abs(ct.at(j).mass - 0.000510999) < 1.e-05) + { + is_El.push_back(1.); + } + else + { + is_El.push_back(0.); + } + } - rv::RVec get_isMu(const rv::RVec& PIDs) { - rv::RVec out; - for(int i = 0; i < PIDs.size(); ++i) { - FCCAnalysesJetConstituentsData is_Mu; - for (int j = 0; j < PIDs.at(i).size(); ++j) { - if ( abs(PIDs.at(i).at(j)) == 13) { - is_Mu.push_back(1.); - } else { - is_Mu.push_back(0.); - } - } - out.push_back(is_Mu); + out.push_back(is_El); } return out; } - - rv::RVec get_isEl(const rv::RVec& PIDs) { + rv::RVec get_isMu(const rv::RVec &jcs) + { rv::RVec out; - for(int i = 0; i < PIDs.size(); ++i) { - FCCAnalysesJetConstituentsData is_El; - FCCAnalysesJetConstituentsData pids = PIDs.at(i); - for (int j = 0; j < pids.size(); ++j) { - if ( abs(pids.at(j)) == 11) { - is_El.push_back(1.); - } else { - is_El.push_back(0.); - } - } - out.push_back(is_El); + for (int i = 0; i < jcs.size(); ++i) + { + FCCAnalysesJetConstituentsData is_Mu; + FCCAnalysesJetConstituents ct = jcs.at(i); + for (int j = 0; j < ct.size(); ++j) + { + if (abs(ct.at(j).charge) > 0 and abs(ct.at(j).mass - 0.105658) < 1.e-03) + { + is_Mu.push_back(1.); + } + else + { + is_Mu.push_back(0.); + } + } + + out.push_back(is_Mu); } return out; } - rv::RVec get_isChargedHad(const rv::RVec& PIDs, - const rv::RVec& jcs) { + rv::RVec get_isChargedHad(const rv::RVec &jcs) + { rv::RVec out; - for(int i = 0; i < PIDs.size(); ++i) { + for (int i = 0; i < jcs.size(); ++i) + { FCCAnalysesJetConstituentsData is_ChargedHad; FCCAnalysesJetConstituents ct = jcs.at(i); - FCCAnalysesJetConstituentsData pids = PIDs.at(i); - for (int j = 0; j < pids.size(); ++j) { - if (ct.at(j).charge != 0 && abs(pids.at(j)) != 11 && abs(pids.at(j)) != 13) { - is_ChargedHad.push_back(1.); - } else { - is_ChargedHad.push_back(0.); + for (int j = 0; j < ct.size(); ++j) + { + if (abs(ct.at(j).charge) > 0 and abs(ct.at(j).mass - 0.13957) < 1.e-03) + { + is_ChargedHad.push_back(1.); + } + else + { + is_ChargedHad.push_back(0.); } - } + } + out.push_back(is_ChargedHad); } return out; } - rv::RVec get_isGamma(const rv::RVec& PIDs) { + rv::RVec get_isNeutralHad(const rv::RVec &jcs) + { rv::RVec out; - for(int i = 0; i < PIDs.size(); ++i) { - FCCAnalysesJetConstituentsData is_Gamma; - FCCAnalysesJetConstituentsData pids = PIDs.at(i); - for (int j = 0; j < pids.size(); ++j) { - if ( abs(pids.at(j)) == 22) { - is_Gamma.push_back(1.); - } else { - is_Gamma.push_back(0.); + for (int i = 0; i < jcs.size(); ++i) + { + FCCAnalysesJetConstituentsData is_NeutralHad; + FCCAnalysesJetConstituents ct = jcs.at(i); + for (int j = 0; j < ct.size(); ++j) + { + if (ct.at(j).type == 130) + { + is_NeutralHad.push_back(1.); } + else + is_NeutralHad.push_back(0.); } - out.push_back(is_Gamma); + out.push_back(is_NeutralHad); } return out; } - rv::RVec get_isNeutralHad(const rv::RVec& PIDs, - const rv::RVec& jcs) { + rv::RVec get_isGamma(const rv::RVec &jcs) + { rv::RVec out; - for(int i = 0; i < PIDs.size(); ++i) { + for (int i = 0; i < jcs.size(); ++i) + { FCCAnalysesJetConstituentsData is_NeutralHad; FCCAnalysesJetConstituents ct = jcs.at(i); - FCCAnalysesJetConstituentsData pids = PIDs.at(i); - for (int j = 0; j < pids.size(); ++j) { - if (ct.at(j).charge == 0 && abs(pids.at(j)) != 22 ) - is_NeutralHad.push_back(1.); - else - is_NeutralHad.push_back(0.); - } - out.push_back(is_NeutralHad); + for (int j = 0; j < ct.size(); ++j) + { + if (ct.at(j).type == 22) + { + is_NeutralHad.push_back(1.); + } + else + is_NeutralHad.push_back(0.); + } + out.push_back(is_NeutralHad); } return out; } - - //countings - int count_jets(rv::RVec jets) { + // countings + int count_jets(rv::RVec jets) + { return jets.size(); } - - rv::RVec count_consts(rv::RVec jets) { + + rv::RVec count_consts(rv::RVec jets) + { rv::RVec out; - for(int i = 0; i < jets.size(); ++i) { - out.push_back(jets.at(i).size()); + for (int i = 0; i < jets.size(); ++i) + { + out.push_back(jets.at(i).size()); } return out; } - rv::RVec count_type(const rv::RVec& isType) { + rv::RVec count_type(const rv::RVec &isType) + { rv::RVec out; - for(int i = 0; i < isType.size(); ++i){ - int count = 0; - rv::RVec istype = isType.at(i); - for(int j = 0; j < istype.size(); ++j){ - if( (int)(istype.at(j)) == 1) count++; - } - out.push_back(count); + for (int i = 0; i < isType.size(); ++i) + { + int count = 0; + rv::RVec istype = isType.at(i); + for (int j = 0; j < istype.size(); ++j) + { + if ((int)(istype.at(j)) == 1) + count++; + } + out.push_back(count); } return out; } - //compute residues - rv::RVec compute_tlv_jets(const rv::RVec& jets) { + // compute residues + rv::RVec compute_tlv_jets(const rv::RVec &jets) + { rv::RVec out; - for(const auto& jet : jets) { - TLorentzVector tlv_jet; - tlv_jet.SetPxPyPzE(jet.px(), jet.py(), jet.pz(), jet.E()); - out.push_back(tlv_jet); + for (const auto &jet : jets) + { + TLorentzVector tlv_jet; + tlv_jet.SetPxPyPzE(jet.px(), jet.py(), jet.pz(), jet.E()); + out.push_back(tlv_jet); } return out; } - rv::RVec sum_tlv_constituents(const rv::RVec& jets) { + rv::RVec sum_tlv_constituents(const rv::RVec &jets) + { rv::RVec out; - for (int i = 0; i < jets.size(); ++i) { - TLorentzVector sum_tlv; // initialized by (0., 0., 0., 0.) - FCCAnalysesJetConstituents jcs = jets.at(i); - for (const auto& jc : jcs) { - TLorentzVector tlv; - tlv.SetPxPyPzE(jc.momentum.x, jc.momentum.y, jc.momentum.z, jc.energy); - sum_tlv += tlv; - } - out.push_back(sum_tlv); + for (int i = 0; i < jets.size(); ++i) + { + TLorentzVector sum_tlv; // initialized by (0., 0., 0., 0.) + FCCAnalysesJetConstituents jcs = jets.at(i); + for (const auto &jc : jcs) + { + TLorentzVector tlv; + tlv.SetPxPyPzE(jc.momentum.x, jc.momentum.y, jc.momentum.z, jc.energy); + sum_tlv += tlv; + } + out.push_back(sum_tlv); } return out; } - - float InvariantMass(const TLorentzVector& tlv1, const TLorentzVector& tlv2) { + + float InvariantMass(const TLorentzVector &tlv1, const TLorentzVector &tlv2) + { float E = tlv1.E() + tlv2.E(); float px = tlv1.Px() + tlv2.Px(); float py = tlv1.Py() + tlv2.Py(); float pz = tlv1.Pz() + tlv2.Pz(); - return std::sqrt(E*E - px*px - py*py - pz*pz); + return std::sqrt(E * E - px * px - py * py - pz * pz); } + + rv::RVec all_invariant_masses(rv::RVec AllJets) { + + TLorentzVector tlv1; + TLorentzVector tlv2; + double E, px, py, pz; + double invmass; + + rv::RVec InvariantMasses; + + // For each jet, take its invariant mass with the remaining jets. Stop at last jet. + for(int i = 0; i < AllJets.size()-1; ++i) { + + tlv1 = AllJets.at(i); + + for(int j=i+1; j < AllJets.size(); ++j){ // go until end + tlv2 = AllJets.at(j); + E = tlv1.E() + tlv2.E(); + px = tlv1.Px() + tlv2.Px(); + py = tlv1.Py() + tlv2.Py(); + pz = tlv1.Pz() + tlv2.Pz(); + + invmass = std::sqrt(E*E - px*px - py*py - pz*pz); + InvariantMasses.push_back(invmass); + + } + } + + return InvariantMasses; + } + rv::RVec compute_residue_energy(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs) { + rv::RVec out; - for(int i = 0; i < tlv_jet.size(); ++i) { - float de = (sum_tlv_jcs.at(i).E() - tlv_jet.at(i).E())/tlv_jet.at(i).E(); - out.push_back(de); + for (int i = 0; i < tlv_jet.size(); ++i) + { + float de = (sum_tlv_jcs.at(i).E() - tlv_jet.at(i).E()) / tlv_jet.at(i).E(); + out.push_back(de); } return out; } - rv::RVec compute_residue_px(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs) { + rv::RVec compute_residue_px(const rv::RVec &tlv_jet, const rv::RVec &sum_tlv_jcs) + { rv::RVec out; - for(int i = 0; i < tlv_jet.size(); ++i) { - float dpx = (sum_tlv_jcs.at(i).Px() - tlv_jet.at(i).Px())/tlv_jet.at(i).Px(); + for (int i = 0; i < tlv_jet.size(); ++i) + { + float dpx = (sum_tlv_jcs.at(i).Px() - tlv_jet.at(i).Px()) / tlv_jet.at(i).Px(); out.push_back(dpx); } return out; } - rv::RVec compute_residue_py(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs) { + rv::RVec compute_residue_py(const rv::RVec &tlv_jet, const rv::RVec &sum_tlv_jcs) + { rv::RVec out; - for(int i = 0; i < tlv_jet.size(); ++i) { - float dpy = (sum_tlv_jcs.at(i).Py() - tlv_jet.at(i).Py())/tlv_jet.at(i).Py(); + for (int i = 0; i < tlv_jet.size(); ++i) + { + float dpy = (sum_tlv_jcs.at(i).Py() - tlv_jet.at(i).Py()) / tlv_jet.at(i).Py(); out.push_back(dpy); } return out; } - rv::RVec compute_residue_pz(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs) { + rv::RVec compute_residue_pz(const rv::RVec &tlv_jet, const rv::RVec &sum_tlv_jcs) + { rv::RVec out; - for(int i = 0; i < tlv_jet.size(); ++i) { - float dpz = (sum_tlv_jcs.at(i).Pz() - tlv_jet.at(i).Pz())/tlv_jet.at(i).Pz(); + for (int i = 0; i < tlv_jet.size(); ++i) + { + float dpz = (sum_tlv_jcs.at(i).Pz() - tlv_jet.at(i).Pz()) / tlv_jet.at(i).Pz(); out.push_back(dpz); } return out; } - rv::RVec compute_residue_pt(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs) { + rv::RVec compute_residue_pt(const rv::RVec &tlv_jet, const rv::RVec &sum_tlv_jcs) + { rv::RVec out; - for(int i = 0; i < tlv_jet.size(); ++i) { - double pt_jet = std::sqrt( tlv_jet.at(i).Px()*tlv_jet.at(i).Px() + tlv_jet.at(i).Py()*tlv_jet.at(i).Py() ); - double pt_jcs = std::sqrt( sum_tlv_jcs.at(i).Px()*sum_tlv_jcs.at(i).Px() + sum_tlv_jcs.at(i).Py()*sum_tlv_jcs.at(i).Py() ); - double dpt = ( pt_jcs - pt_jet)/pt_jet; - out.push_back(dpt); - } + for (int i = 0; i < tlv_jet.size(); ++i) + { + double pt_jet = std::sqrt(tlv_jet.at(i).Px() * tlv_jet.at(i).Px() + tlv_jet.at(i).Py() * tlv_jet.at(i).Py()); + double pt_jcs = std::sqrt(sum_tlv_jcs.at(i).Px() * sum_tlv_jcs.at(i).Px() + sum_tlv_jcs.at(i).Py() * sum_tlv_jcs.at(i).Py()); + double dpt = (pt_jcs - pt_jet) / pt_jet; + out.push_back(dpt); + } return out; } - rv::RVec compute_residue_phi(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs) { + rv::RVec compute_residue_phi(const rv::RVec &tlv_jet, const rv::RVec &sum_tlv_jcs) + { rv::RVec out; - for(int i = 0; i < tlv_jet.size(); ++i) { - double phi_jet = tlv_jet.at(i).Phi(); + for (int i = 0; i < tlv_jet.size(); ++i) + { + double phi_jet = tlv_jet.at(i).Phi(); double phi_jcs = sum_tlv_jcs.at(i).Phi(); - double dphi = (phi_jcs - phi_jet)/phi_jet; - out.push_back(dphi); + double dphi = (phi_jcs - phi_jet) / phi_jet; + out.push_back(dphi); } return out; } - rv::RVec compute_residue_theta(const rv::RVec& tlv_jet, const rv::RVec& sum_tlv_jcs) { + rv::RVec compute_residue_theta(const rv::RVec &tlv_jet, const rv::RVec &sum_tlv_jcs) + { rv::RVec out; - for(int i = 0; i < tlv_jet.size(); ++i) { - double theta_jet = tlv_jet.at(i).Theta(); - double theta_jcs = sum_tlv_jcs.at(i).Theta(); - double dtheta = (theta_jcs - theta_jet)/theta_jet; - out.push_back(dtheta); - } + for (int i = 0; i < tlv_jet.size(); ++i) + { + double theta_jet = tlv_jet.at(i).Theta(); + double theta_jcs = sum_tlv_jcs.at(i).Theta(); + double dtheta = (theta_jcs - theta_jet) / theta_jet; + out.push_back(dtheta); + } return out; } - - } // namespace JetConstituentsUtils -} // namespace FCCAnalyses + } // namespace JetConstituentsUtils +} // namespace FCCAnalyses diff --git a/analyzers/dataframe/src/JetFlavourUtils.cc b/analyzers/dataframe/src/JetFlavourUtils.cc index 9e0f2012f9..7a88909bf4 100644 --- a/analyzers/dataframe/src/JetFlavourUtils.cc +++ b/analyzers/dataframe/src/JetFlavourUtils.cc @@ -4,17 +4,23 @@ #include namespace FCCAnalyses { - std::unique_ptr gWeaver; + std::vector gWeavers; + bool isSetup = false; namespace JetFlavourUtils { void setup_weaver(const std::string& onnx_filename, const std::string& json_filename, - const rv::RVec& vars) { - gWeaver = std::make_unique(onnx_filename, json_filename, vars); + const rv::RVec& vars, + const unsigned int nSlots) { + for(unsigned int i=0; i > compute_weights(const rv::RVec& vars) { - if (!gWeaver) + rv::RVec > compute_weights(unsigned int slot, const rv::RVec& vars) { + if (!isSetup) throw std::runtime_error("Weaver interface is not initialised!"); rv::RVec > out; if (vars.empty()) // no variables registered @@ -33,7 +39,7 @@ namespace FCCAnalyses { constit_vars.push_back((float)vars.at(k).at(i).at(j)); jet_sc_vars.push_back(constit_vars); } - out.emplace_back(gWeaver->run(jet_sc_vars)); + out.emplace_back(gWeavers.at(slot)->run(jet_sc_vars)); } return out; } diff --git a/analyzers/dataframe/src/JetTaggingUtils.cc b/analyzers/dataframe/src/JetTaggingUtils.cc index 2600cd3d3f..7ed529c18a 100644 --- a/analyzers/dataframe/src/JetTaggingUtils.cc +++ b/analyzers/dataframe/src/JetTaggingUtils.cc @@ -1,148 +1,157 @@ -#include "FCCAnalyses/JetTaggingUtils.h" - -namespace FCCAnalyses{ - -namespace JetTaggingUtils{ - -ROOT::VecOps::RVec get_flavour(ROOT::VecOps::RVec in, - ROOT::VecOps::RVec MCin) -{ - ROOT::VecOps::RVec result(in.size(),0); - - int loopcount =0; - for (size_t i = 0; i < MCin.size(); ++i) { - auto & parton = MCin[i]; - //Select partons only (for pythia8 71-79, for pythia6 2): - if ((parton.generatorStatus>80 || - parton.generatorStatus<70) && - parton.generatorStatus != 2 ) continue; - if (std::abs(parton.PDG) > 5 && parton.PDG!=21) continue; - ROOT::Math::PxPyPzMVector lv(parton.momentum.x, parton.momentum.y, - parton.momentum.z, parton.mass); - - for (size_t j = 0; j < in.size(); ++j) { - auto & p = in[j]; - //float dEta = lv.Eta() - p.eta(); - //float dPhi = lv.Phi() - p.phi(); - //float deltaR = sqrt(dEta*dEta+dPhi*dPhi); - //if (deltaR <= 0.5 && gRandom->Uniform() <= efficiency) result[j] = true; - - Float_t dot = p.px() * parton.momentum.x - + p.py() * parton.momentum.y - + p.pz() * parton.momentum.z; - Float_t lenSq1 = p.px() * p.px() - + p.py() * p.py() - + p.pz() * p.pz(); - Float_t lenSq2 = parton.momentum.x * parton.momentum.x - + parton.momentum.y * parton.momentum.y - + parton.momentum.z * parton.momentum.z; - Float_t norm = sqrt(lenSq1*lenSq2); - Float_t angle = acos(dot/norm); - - if (angle <= 0.3) { - if (result[j]==21 or result[j]==0) { - // if no match before, or matched to gluon, match to - // this particle (favour quarks over gluons) - result[j] = std::abs ( parton.PDG ); - } - else if (parton.PDG!=21) { - // if matched to quark, and this is a quark, favour - // heavier flavours - result[j] = std::max(result[j], std::abs ( parton.PDG )); - } else { - // if matched to quark, and this is a gluon, keep - // previous result (favour quark) - ; - } - } - - - } - } - - return result; -} - -ROOT::VecOps::RVec -get_btag(ROOT::VecOps::RVec in, - float efficiency, float mistag_c, - float mistag_l, float mistag_g) { - - ROOT::VecOps::RVec result(in.size(),0); - - for (size_t j = 0; j < in.size(); ++j) { - if (in.at(j) == 5 && gRandom->Uniform() <= efficiency) result[j] = 1; - if (in.at(j) == 4 && gRandom->Uniform() <= mistag_c) result[j] = 1; - if (in.at(j) < 4 && gRandom->Uniform() <= mistag_l) result[j] = 1; - if (in.at(j) == 21 && gRandom->Uniform() <= mistag_g) result[j] = 1; - } - return result; -} - -ROOT::VecOps::RVec -get_ctag(ROOT::VecOps::RVec in, - float efficiency, float mistag_b, - float mistag_l, float mistag_g) { - - ROOT::VecOps::RVec result(in.size(),0); - - for (size_t j = 0; j < in.size(); ++j) { - if (in.at(j) == 4 && gRandom->Uniform() <= efficiency) result[j] = 1; - if (in.at(j) == 5 && gRandom->Uniform() <= mistag_b) result[j] = 1; - if (in.at(j) < 4 && gRandom->Uniform() <= mistag_l) result[j] = 1; - if (in.at(j) == 21 && gRandom->Uniform() <= mistag_g) result[j] = 1; - } - return result; -} - -ROOT::VecOps::RVec -get_ltag(ROOT::VecOps::RVec in, - float efficiency, float mistag_b, - float mistag_c, float mistag_g) { - - ROOT::VecOps::RVec result(in.size(),0); - - for (size_t j = 0; j < in.size(); ++j) { - if (in.at(j) < 4 && gRandom->Uniform() <= efficiency) result[j] = 1; - if (in.at(j) == 5 && gRandom->Uniform() <= mistag_b) result[j] = 1; - if (in.at(j) == 4 && gRandom->Uniform() <= mistag_c) result[j] = 1; - if (in.at(j) == 21 && gRandom->Uniform() <= mistag_g) result[j] = 1; - } - return result; -} - -ROOT::VecOps::RVec -get_gtag(ROOT::VecOps::RVec in, - float efficiency, float mistag_b, - float mistag_c, float mistag_l) { - - ROOT::VecOps::RVec result(in.size(),0); - - for (size_t j = 0; j < in.size(); ++j) { - if (in.at(j) == 21 && gRandom->Uniform() <= efficiency) result[j] = 1; - if (in.at(j) == 5 && gRandom->Uniform() <= mistag_b) result[j] = 1; - if (in.at(j) == 4 && gRandom->Uniform() <= mistag_c) result[j] = 1; - if (in.at(j) < 4 && gRandom->Uniform() <= mistag_l) result[j] = 1; - } - return result; -} - -sel_tag::sel_tag(bool arg_pass): m_pass(arg_pass) {}; -ROOT::VecOps::RVec -sel_tag::operator()(ROOT::VecOps::RVec tags, - ROOT::VecOps::RVec in){ - ROOT::VecOps::RVec result; - for (size_t i = 0; i < in.size(); ++i) { - if (m_pass) { - if (tags.at(i)) result.push_back(in.at(i)); - } - else { - if (!tags.at(i)) result.push_back(in.at(i)); - } - } - return result; -} - -}//end NS JetTaggingUtils - -}//end NS FCCAnalyses +#include "FCCAnalyses/JetTaggingUtils.h" + +namespace FCCAnalyses { + +namespace JetTaggingUtils { + +ROOT::VecOps::RVec +get_flavour(ROOT::VecOps::RVec in, + ROOT::VecOps::RVec MCin) { + ROOT::VecOps::RVec result(in.size(), 0); + + int loopcount = 0; + for (size_t i = 0; i < MCin.size(); ++i) { + auto &parton = MCin[i]; + // Select partons only (for pythia8 71-79, for pythia6 2): + if ((parton.generatorStatus > 80 || parton.generatorStatus < 70) && + parton.generatorStatus != 2) + continue; + if (std::abs(parton.PDG) > 5 && parton.PDG != 21) + continue; + ROOT::Math::PxPyPzMVector lv(parton.momentum.x, parton.momentum.y, + parton.momentum.z, parton.mass); + + for (size_t j = 0; j < in.size(); ++j) { + auto &p = in[j]; + // float dEta = lv.Eta() - p.eta(); + // float dPhi = lv.Phi() - p.phi(); + // float deltaR = sqrt(dEta*dEta+dPhi*dPhi); + // if (deltaR <= 0.5 && gRandom->Uniform() <= efficiency) result[j] = + // true; + + Float_t dot = p.px() * parton.momentum.x + p.py() * parton.momentum.y + + p.pz() * parton.momentum.z; + Float_t lenSq1 = p.px() * p.px() + p.py() * p.py() + p.pz() * p.pz(); + Float_t lenSq2 = parton.momentum.x * parton.momentum.x + + parton.momentum.y * parton.momentum.y + + parton.momentum.z * parton.momentum.z; + Float_t norm = sqrt(lenSq1 * lenSq2); + Float_t angle = acos(dot / norm); + + if (angle <= 0.3) { + if (result[j] == 21 or result[j] == 0) { + // if no match before, or matched to gluon, match to + // this particle (favour quarks over gluons) + result[j] = std::abs(parton.PDG); + } else if (parton.PDG != 21) { + // if matched to quark, and this is a quark, favour + // heavier flavours + result[j] = std::max(result[j], std::abs(parton.PDG)); + } else { + // if matched to quark, and this is a gluon, keep + // previous result (favour quark) + ; + } + } + } + } + + return result; +} + +ROOT::VecOps::RVec get_btag(ROOT::VecOps::RVec in, float efficiency, + float mistag_c, float mistag_l, + float mistag_g) { + + ROOT::VecOps::RVec result(in.size(), 0); + + for (size_t j = 0; j < in.size(); ++j) { + if (in.at(j) == 5 && gRandom->Uniform() <= efficiency) + result[j] = 1; + if (in.at(j) == 4 && gRandom->Uniform() <= mistag_c) + result[j] = 1; + if (in.at(j) < 4 && gRandom->Uniform() <= mistag_l) + result[j] = 1; + if (in.at(j) == 21 && gRandom->Uniform() <= mistag_g) + result[j] = 1; + } + return result; +} + +ROOT::VecOps::RVec get_ctag(ROOT::VecOps::RVec in, float efficiency, + float mistag_b, float mistag_l, + float mistag_g) { + + ROOT::VecOps::RVec result(in.size(), 0); + + for (size_t j = 0; j < in.size(); ++j) { + if (in.at(j) == 4 && gRandom->Uniform() <= efficiency) + result[j] = 1; + if (in.at(j) == 5 && gRandom->Uniform() <= mistag_b) + result[j] = 1; + if (in.at(j) < 4 && gRandom->Uniform() <= mistag_l) + result[j] = 1; + if (in.at(j) == 21 && gRandom->Uniform() <= mistag_g) + result[j] = 1; + } + return result; +} + +ROOT::VecOps::RVec get_ltag(ROOT::VecOps::RVec in, float efficiency, + float mistag_b, float mistag_c, + float mistag_g) { + + ROOT::VecOps::RVec result(in.size(), 0); + + for (size_t j = 0; j < in.size(); ++j) { + if (in.at(j) < 4 && gRandom->Uniform() <= efficiency) + result[j] = 1; + if (in.at(j) == 5 && gRandom->Uniform() <= mistag_b) + result[j] = 1; + if (in.at(j) == 4 && gRandom->Uniform() <= mistag_c) + result[j] = 1; + if (in.at(j) == 21 && gRandom->Uniform() <= mistag_g) + result[j] = 1; + } + return result; +} + +ROOT::VecOps::RVec get_gtag(ROOT::VecOps::RVec in, float efficiency, + float mistag_b, float mistag_c, + float mistag_l) { + + ROOT::VecOps::RVec result(in.size(), 0); + + for (size_t j = 0; j < in.size(); ++j) { + if (in.at(j) == 21 && gRandom->Uniform() <= efficiency) + result[j] = 1; + if (in.at(j) == 5 && gRandom->Uniform() <= mistag_b) + result[j] = 1; + if (in.at(j) == 4 && gRandom->Uniform() <= mistag_c) + result[j] = 1; + if (in.at(j) < 4 && gRandom->Uniform() <= mistag_l) + result[j] = 1; + } + return result; +} + +sel_tag::sel_tag(bool arg_pass) : m_pass(arg_pass){}; +ROOT::VecOps::RVec +sel_tag::operator()(ROOT::VecOps::RVec tags, + ROOT::VecOps::RVec in) { + ROOT::VecOps::RVec result; + for (size_t i = 0; i < in.size(); ++i) { + if (m_pass) { + if (tags.at(i)) + result.push_back(in.at(i)); + } else { + if (!tags.at(i)) + result.push_back(in.at(i)); + } + } + return result; +} + +} // namespace JetTaggingUtils + +} // namespace FCCAnalyses diff --git a/analyzers/dataframe/src/MCParticle.cc b/analyzers/dataframe/src/MCParticle.cc index db5c9f8499..a63b37480c 100644 --- a/analyzers/dataframe/src/MCParticle.cc +++ b/analyzers/dataframe/src/MCParticle.cc @@ -1,6 +1,7 @@ #include "FCCAnalyses/MCParticle.h" - #include +#include +#include namespace FCCAnalyses{ @@ -85,7 +86,9 @@ bool filter_pdgID::operator() (ROOT::VecOps::RVec in) get_EventPrimaryVertex::get_EventPrimaryVertex( int arg_genstatus) { m_genstatus = arg_genstatus; }; TVector3 get_EventPrimaryVertex::operator() ( ROOT::VecOps::RVec in ) { TVector3 result(-1e12,-1e12,-1e12); + int i=0; for (auto & p: in) { + i++; if ( p.generatorStatus == m_genstatus ) { // generator status code for the incoming particles of the hardest subprocess TVector3 res( p.vertex.x, p.vertex.y, p.vertex.z ); result = res; @@ -96,6 +99,36 @@ TVector3 get_EventPrimaryVertex::operator() ( ROOT::VecOps::RVec in ) { + TLorentzVector result(-1e12,-1e12,-1e12,-1e12); + Bool_t found_py8 = false; + //std::cout<<"-------------------------------------------"< 1.e-12 ) { // generator status code for the incoming particles of the hardest subprocess + // vertex.time is in s, convert in mm here. + TLorentzVector res( p.vertex.x, p.vertex.y, p.vertex.z, p.time * 1.0e3 * 2.99792458e+8); + result = res; + break; + } + } + } + //std::cout< get_tree::operator() (ROOT::VecOps::RVec in, ROOT::VecOps::RVec ind){ diff --git a/analyzers/dataframe/src/ReconstructedParticle.cc b/analyzers/dataframe/src/ReconstructedParticle.cc index e6f67062b7..fd34a17f23 100644 --- a/analyzers/dataframe/src/ReconstructedParticle.cc +++ b/analyzers/dataframe/src/ReconstructedParticle.cc @@ -18,6 +18,22 @@ ROOT::VecOps::RVec sel_pt::operator() (ROOT return result; } +sel_eta::sel_eta(float arg_min_eta) : m_min_eta(arg_min_eta) {}; +ROOT::VecOps::RVec sel_eta::operator() (ROOT::VecOps::RVec in) { + ROOT::VecOps::RVec result; + result.reserve(in.size()); + for (size_t i = 0; i < in.size(); ++i) { + auto & p = in[i]; + TLorentzVector tv1; + tv1.SetXYZM(p.momentum.x, p.momentum.y, p.momentum.z, p.mass); + if (abs(tv1.Eta()) < abs(m_min_eta)){ + result.emplace_back(p); + } + } + return result; +} + + sel_p::sel_p(float arg_min_p, float arg_max_p) : m_min_p(arg_min_p), m_max_p(arg_max_p) {}; ROOT::VecOps::RVec sel_p::operator() (ROOT::VecOps::RVec in) { ROOT::VecOps::RVec result; @@ -234,6 +250,16 @@ ROOT::VecOps::RVec get(ROOT::VecOps::RVec in) { + TLorentzVector P4sum; + for (auto & p: in) { + TLorentzVector tlv; + tlv.SetXYZM(p.momentum.x, p.momentum.y, p.momentum.z, p.mass); + P4sum += tlv; + } + return P4sum; + } + ROOT::VecOps::RVec get_mass(ROOT::VecOps::RVec in) { ROOT::VecOps::RVec result; diff --git a/analyzers/dataframe/src/ReconstructedParticle2Track.cc b/analyzers/dataframe/src/ReconstructedParticle2Track.cc index 38de350179..f8aa59e890 100644 --- a/analyzers/dataframe/src/ReconstructedParticle2Track.cc +++ b/analyzers/dataframe/src/ReconstructedParticle2Track.cc @@ -1,14 +1,39 @@ #include "FCCAnalyses/ReconstructedParticle2Track.h" +#include "FCCAnalyses/VertexingUtils.h" namespace FCCAnalyses{ namespace ReconstructedParticle2Track{ + ROOT::VecOps::RVec + getRP2TRK_mom(ROOT::VecOps::RVec in, + ROOT::VecOps::RVec tracks) { + ROOT::VecOps::RVec result; + for (auto & p: in) { + if (p.tracks_begin + getRP2TRK_charge(ROOT::VecOps::RVec in, + ROOT::VecOps::RVec tracks) { + ROOT::VecOps::RVec result; + for (auto & p: in) { + if (p.tracks_begin getRP2TRK_Bz(const ROOT::VecOps::RVec& rps, const ROOT::VecOps::RVec& tracks) { const double c_light = 2.99792458e8; const double a = c_light * 1e3 * 1e-15; //[omega] = 1/mm ROOT::VecOps::RVec out; - + for(auto & p: rps) { if(p.tracks_begin < tracks.size()) { double pt= sqrt(p.momentum.x * p.momentum.x + p.momentum.y * p.momentum.y); @@ -23,8 +48,8 @@ namespace ReconstructedParticle2Track{ float Bz(const ROOT::VecOps::RVec& rps, const ROOT::VecOps::RVec& tracks) { const double c_light = 2.99792458e8;// speed of light m/sec; - const double a = c_light * 1e3 * 1e-15; //[omega] = 1/mm - + const double a = c_light * 1e3 * 1e-15; //[omega] = 1/mm + double Bz = -9; for(auto & p: rps) { @@ -38,38 +63,39 @@ namespace ReconstructedParticle2Track{ ROOT::VecOps::RVec XPtoPar_dxy(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& V, + const TLorentzVector& V, // primary vertex const float& Bz) { - - const double cSpeed = 2.99792458e8 * 1.0e-9; - + + const double cSpeed = 2.99792458e8 * 1.0e-9; + ROOT::VecOps::RVec out; for (const auto & rp: in) { - + if( rp.tracks_begin < tracks.size()) { - + float D0_wrt0 = tracks.at(rp.tracks_begin).D0; float Z0_wrt0 = tracks.at(rp.tracks_begin).Z0; float phi0_wrt0 = tracks.at(rp.tracks_begin).phi; TVector3 X( - D0_wrt0 * TMath::Sin(phi0_wrt0) , D0_wrt0 * TMath::Cos(phi0_wrt0) , Z0_wrt0); - TVector3 x = X - V; - + TVector3 x = X - V.Vect(); + //std::cout<<"vertex: "< 0) { - double T = TMath::Sqrt(pt * pt - 2 * a * cross + a * a * r2); - if (pt < 10.0) D = (T - pt) / a; - else D = (-2 * cross + a * r2) / (T + pt); + double T = TMath::Sqrt(pt * pt - 2 * a * cross + a * a * r2); + if (pt < 10.0) D = (T - pt) / a; + else D = (-2 * cross + a * r2) / (T + pt); } - out.push_back(D); - + //std::cout<<"displ: "< XPtoPar_dz(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& V, + const TLorentzVector& V, // primary vertex const float& Bz) { - const double cSpeed = 2.99792458e8 * 1.0e-9; //Reduced speed of light ??? + const double cSpeed = 2.99792458e8 * 1.0e-9; //Reduced speed of light ??? ROOT::VecOps::RVec out; @@ -97,7 +123,7 @@ namespace ReconstructedParticle2Track{ float phi0_wrt0 = tracks.at(rp.tracks_begin).phi; TVector3 X( - D0_wrt0 * TMath::Sin(phi0_wrt0) , D0_wrt0 * TMath::Cos(phi0_wrt0) , Z0_wrt0); - TVector3 x = X - V; + TVector3 x = X - V.Vect(); TVector3 p(rp.momentum.x, rp.momentum.y, rp.momentum.z); @@ -111,6 +137,7 @@ namespace ReconstructedParticle2Track{ if (pt < 10.0) D = (T - pt) / a; else D = (-2 * cross + a * r2) / (T + pt); double B = C * TMath::Sqrt(TMath::Max(r2 - D * D, 0.0) / (1 + 2 * C * D)); + if ( TMath::Abs(B) > 1.) B = TMath::Sign(1, B); double st = TMath::ASin(B) / C; double ct = p(2) / pt; double z0; @@ -128,10 +155,10 @@ namespace ReconstructedParticle2Track{ ROOT::VecOps::RVec XPtoPar_phi(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& V, + const TLorentzVector& V, // primary vertex const float& Bz) { - const double cSpeed = 2.99792458e8 * 1.0e-9; //Reduced speed of light ??? + const double cSpeed = 2.99792458e8 * 1.0e-9; //Reduced speed of light ??? ROOT::VecOps::RVec out; @@ -144,7 +171,7 @@ namespace ReconstructedParticle2Track{ float phi0_wrt0 = tracks.at(rp.tracks_begin).phi; TVector3 X( - D0_wrt0 * TMath::Sin(phi0_wrt0) , D0_wrt0 * TMath::Cos(phi0_wrt0) , Z0_wrt0); - TVector3 x = X - V; + TVector3 x = X - V.Vect(); TVector3 p(rp.momentum.x, rp.momentum.y, rp.momentum.z); @@ -154,7 +181,7 @@ namespace ReconstructedParticle2Track{ double cross = x(0) * p(1) - x(1) * p(0); double T = TMath::Sqrt(pt * pt - 2 * a * cross + a * a * r2); double phi0 = TMath::ATan2((p(1) - a * x(0)) / T, (p(0) + a * x(1)) / T); - + out.push_back(phi0); } else { @@ -166,7 +193,6 @@ namespace ReconstructedParticle2Track{ ROOT::VecOps::RVec XPtoPar_C(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& V, const float& Bz) { const double cSpeed = 2.99792458e8 * 1.0e3 * 1.0e-15; @@ -181,7 +207,7 @@ namespace ReconstructedParticle2Track{ double a = std::copysign(1.0, rp.charge) * Bz * cSpeed; double pt = p.Pt(); double C = a/(2 * pt); - + out.push_back(C); } else { out.push_back(-9.); @@ -192,7 +218,6 @@ namespace ReconstructedParticle2Track{ ROOT::VecOps::RVec XPtoPar_ct(const ROOT::VecOps::RVec& in, const ROOT::VecOps::RVec& tracks, - const TVector3& V, const float& Bz) { const double cSpeed = 2.99792458e8 * 1.0e-9; @@ -204,9 +229,9 @@ namespace ReconstructedParticle2Track{ TVector3 p(rp.momentum.x, rp.momentum.y, rp.momentum.z); double pt = p.Pt(); - + double ct = p(2) / pt; - + out.push_back(ct); } else { @@ -217,11 +242,6 @@ namespace ReconstructedParticle2Track{ } - - - - - ROOT::VecOps::RVec getRP2TRK_D0(ROOT::VecOps::RVec in, ROOT::VecOps::RVec tracks) { @@ -503,11 +523,40 @@ getRP2TRK( ROOT::VecOps::RVec in, return result ; } +// returns reco indices of tracks +ROOT::VecOps::RVec +get_recoindTRK( ROOT::VecOps::RVec in, + ROOT::VecOps::RVec tracks ) +{ + + ROOT::VecOps::RVec result ; + + for (unsigned int ctr=0; ctr= 0 && p.tracks_begin x) { int result = x.size(); return result; } +/// +ROOT::VecOps::RVec +hasTRK( ROOT::VecOps::RVec in ) { + + ROOT::VecOps::RVec result ; + result.reserve( in.size() ); + + for (auto & p: in) { + if (p.tracks_begin >= 0 && p.tracks_begin != p.tracks_end) result.push_back(true) ; + else result.push_back(false); + } + return result ; +} + }//end NS ReconstructedParticle2Track }//end NS FCCAnalyses diff --git a/analyzers/dataframe/src/SmearObjects.cc b/analyzers/dataframe/src/SmearObjects.cc new file mode 100644 index 0000000000..c7017e4d8f --- /dev/null +++ b/analyzers/dataframe/src/SmearObjects.cc @@ -0,0 +1,641 @@ +#include "FCCAnalyses/SmearObjects.h" + +#include "FCCAnalyses/VertexFitterSimple.h" +#include "FCCAnalyses/VertexingUtils.h" +#include "TDecompChol.h" + +#include + +namespace FCCAnalyses { + +namespace SmearObjects { + +// ------------------------------------------------------------------------------------------- + +TVectorD TrackParamFromMC_DelphesConv(edm4hep::MCParticleData aMCParticle) { + + TVector3 p(aMCParticle.momentum.x, aMCParticle.momentum.y, + aMCParticle.momentum.z); + TVector3 x(1e-3 * aMCParticle.vertex.x, 1e-3 * aMCParticle.vertex.y, + 1e-3 * aMCParticle.vertex.z); // mm to m + float Q = aMCParticle.charge; + TVectorD param = VertexingUtils::XPtoPar(x, p, Q); // convention Franco + + return param; +} + +// ------------------------------------------------------------------------------------------- + +SmearedTracks::SmearedTracks(float smear_d0, float smear_phi, float smear_omega, + float smear_z0, float smear_tlambda, + bool debug = false) { + + m_smear_parameters[0] = smear_d0; + m_smear_parameters[1] = smear_phi; + m_smear_parameters[2] = smear_omega; + m_smear_parameters[3] = smear_z0; + m_smear_parameters[4] = smear_tlambda; + + m_debug = debug; +} + +ROOT::VecOps::RVec SmearedTracks::operator()( + const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &alltracks, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles) { + + // returns a vector of TrackStates that is parallel to the collection of full + // tracks (alltracks), i.e. same number of entries, same order. The method + // retrieve the MC particle that is associated to a track, and builds a "track + // state" out of the MC particle (i.e. it determines the corresponding (d0, + // phi, omega, z0, tanlambda). From this vector, and from the covariance + // matrix of the track, which is scaled by the user, new track states are + // generated. + + int ntracks = alltracks.size(); + + ROOT::VecOps::RVec result; + result.resize(ntracks); + + edm4hep::TrackState dummy; + + TVectorD zero(5); + for (int k = 0; k < 5; k++) { + zero(k) = 0.; + } + + for (int itrack = 0; itrack < ntracks; itrack++) { + edm4hep::TrackState track = alltracks[itrack]; + edm4hep::TrackState smeared_track = track; + + // find the corresponding MC particle + int MCindex = -1; + for (int ireco = 0; ireco < allRecoParticles.size(); ireco++) { + edm4hep::ReconstructedParticleData rp = allRecoParticles[ireco]; + int track_index = rp.tracks_begin; + if (track_index == itrack) { + MCindex = RP2MC_indices[ireco]; + break; + } + } // end loop on RPs + + if (MCindex < 0 || + MCindex >= + mcParticles + .size()) { // in principle, this should not happen in delphes, + // each track should be matched to a MC particle. + result[itrack] = dummy; + continue; + } + + edm4hep::MCParticleData MCpart = mcParticles[MCindex]; + + // the MC-truth track parameters, in Delphes's comvention + TVectorD mcTrackParam = TrackParamFromMC_DelphesConv(MCpart); + // and in edm4hep convention + TVectorD mcTrackParam_edm4hep = + VertexingUtils::Delphes2Edm4hep_TrackParam(mcTrackParam, false); + + // the covariance matrix of the track, in Delphes's convention + TMatrixDSym Cov = VertexingUtils::get_trackCov(track); + + // if the covMat of the track is pathological (numerical precision issue, + // fraction of tracks = 5e-6): return original track + if (Cov.Determinant() <= 0) { + result[itrack] = smeared_track; + continue; + } + + // scale the covariance matrix + for (int j = 0; j < 5; j++) { + for (int k = 0; k < 5; k++) { + Cov[j][k] = Cov[j][k] * (m_smear_parameters[j] * m_smear_parameters[k]); + } + } + // if (m_debug) { + // Cov.Print(); + //} + + // generate a new track state (in Delphes's convention) + TVectorD smeared_param_delphes = + CovSmear(mcTrackParam, Cov, &m_random, m_debug); + + if (smeared_param_delphes == zero) { // Cholesky decomposition failed + result[itrack] = smeared_track; + continue; + } + + // back to the edm4hep conventions.. + TVectorD smeared_param = VertexingUtils::Delphes2Edm4hep_TrackParam( + smeared_param_delphes, false); + + smeared_track.D0 = smeared_param[0]; + smeared_track.phi = smeared_param[1]; + smeared_track.omega = smeared_param[2]; + smeared_track.Z0 = smeared_param[3]; + smeared_track.tanLambda = smeared_param[4]; + + // transform rescaled cov matrix from Delphes convention to EDM4hep + // convention + std::array covMatrix_edm4hep = + VertexingUtils::Delphes2Edm4hep_TrackCovMatrix(Cov, false); + + smeared_track.covMatrix = covMatrix_edm4hep; + + if (m_debug) { + std::cout << std::endl + << "Original track " << track.D0 << " " << track.phi << " " + << track.omega << " " << track.Z0 << " " << track.tanLambda + << std::endl; + std::cout << "Smeared track " << smeared_track.D0 << " " + << smeared_track.phi << " " << smeared_track.omega << " " + << smeared_track.Z0 << " " << smeared_track.tanLambda + << std::endl; + std::cout << "MC particle " << mcTrackParam_edm4hep[0] << " " + << mcTrackParam_edm4hep[1] << " " << mcTrackParam_edm4hep[2] + << " " << mcTrackParam_edm4hep[3] << " " + << mcTrackParam_edm4hep[4] << std::endl; + for (int j = 0; j < 15; j++) + std::cout << "smeared cov matrix(" << j + << "): " << smeared_track.covMatrix[j] << ", scale factor: " + << smeared_track.covMatrix[j] / track.covMatrix[j] + << std::endl; + } + result[itrack] = smeared_track; + + } // end loop on tracks + + return result; +} + +// ------------------------------------------------------------------------------------------- + +// to validate the SmearedTracks method.. : retrieve the TrackStates of the MC +// particles + +ROOT::VecOps::RVec mcTrackParameters( + const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &alltracks, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles) { + + int ntracks = alltracks.size(); + ROOT::VecOps::RVec result; + + edm4hep::TrackState dummy; + + for (int itrack = 0; itrack < ntracks; itrack++) { + edm4hep::TrackState track = alltracks[itrack]; + + // find the corresponding MC particle + int MCindex = -1; + for (int ireco = 0; ireco < allRecoParticles.size(); ireco++) { + edm4hep::ReconstructedParticleData rp = allRecoParticles[ireco]; + int track_index = rp.tracks_begin; + if (track_index == itrack) { + MCindex = RP2MC_indices[ireco]; + break; + } + } // end loop on RPs + + if (MCindex < 0 || MCindex >= mcParticles.size()) { + result.push_back(dummy); + continue; + } + + edm4hep::MCParticleData MCpart = mcParticles[MCindex]; + + // the MC-truth track parameters + edm4hep::TrackState mcTrack; + TVectorD mcTrackParam_delphes = + TrackParamFromMC_DelphesConv(MCpart); // delphes convention, units = m + TVectorD mcTrackParam = VertexingUtils::Delphes2Edm4hep_TrackParam( + mcTrackParam_delphes, false); // edm4hep convention + + mcTrack.D0 = mcTrackParam[0]; + mcTrack.phi = mcTrackParam[1]; + mcTrack.omega = mcTrackParam[2]; + mcTrack.Z0 = mcTrackParam[3]; + mcTrack.tanLambda = mcTrackParam[4]; + + result.push_back(mcTrack); + } + return result; +} + +// -------------------------------------------------------- + +// code from FB + +TVectorD CovSmear(TVectorD x, TMatrixDSym C, TRandom *ran, bool debug = false) { + + // + // Check arrays + // + // Consistency of dimensions + Int_t Nvec = x.GetNrows(); + Int_t Nmat = C.GetNrows(); + if (Nvec != Nmat || Nvec == 0) { + std::cout << "TrkUtil::CovSmear: vector/matrix mismatch. Aborting." + << std::endl; + exit(EXIT_FAILURE); + } + // Positive diagonal elements + for (Int_t i = 0; i < Nvec; i++) { + if (C(i, i) <= 0.0) { + std::cout << "TrkUtil::CovSmear: covariance matrix has negative diagonal " + "elements. Aborting." + << std::endl; + exit(EXIT_FAILURE); + } + } + // + // Do a Choleski decomposition and random number extraction, with appropriate + // stabilization + // + TMatrixDSym CvN = C; + TMatrixDSym DCv(Nvec); + DCv.Zero(); + TMatrixDSym DCvInv(Nvec); + DCvInv.Zero(); + for (Int_t id = 0; id < Nvec; id++) { + Double_t dVal = TMath::Sqrt(C(id, id)); + DCv(id, id) = dVal; + DCvInv(id, id) = 1.0 / dVal; + } + CvN.Similarity(DCvInv); // Normalize diagonal to 1 + TDecompChol Chl(CvN); + Bool_t OK = Chl.Decompose(); // Choleski decomposition of normalized matrix + if (!OK) { + std::cout << "SmearingObjects::CovSmear: covariance matrix is not positive " + "definite. Will use the original track." + << std::endl; + // exit(EXIT_FAILURE); + TVectorD zero(5); + for (int k = 0; k < 5; k++) { + zero(k) = 0.; + } + return zero; + } + TMatrixD U = Chl.GetU(); // Get Upper triangular matrix + TMatrixD Ut(TMatrixD::kTransposed, U); // Transposed of U (lower triangular) + TVectorD r(Nvec); + for (Int_t i = 0; i < Nvec; i++) + r(i) = ran->Gaus(0.0, 1.0); // Array of normal random numbers + if (debug) + std::cout << " random nb " << ran->Gaus(0.0, 1.0) << std::endl; + TVectorD xOut = x + DCv * (Ut * r); // Observed parameter vector + // + return xOut; +} + +// ------------------------------------------------------------------------------------------- + +SmearedTracksdNdx::SmearedTracksdNdx(float scale, bool debug = false) { + + // rescale resolution by this factor + m_scale = scale; + + // debug flag + m_debug = debug; +} + +ROOT::VecOps::RVec SmearedTracksdNdx::operator()( + const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &dNdx, + const ROOT::VecOps::RVec &length, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles) { + + // returns a vector of dNdx that is parallel to the collection of full + // tracks (alltracks), i.e. same number of entries, same order. The method + // retrieve the MC particle that is associated to a track, and builds a "track + // state" out of the MC particle and regenerates a new value of the dNdx + + ROOT::VecOps::RVec result; + edm4hep::Quantity dummy; + + int ntracks = dNdx.size(); + result.resize(ntracks); + + // for dNdx calculation + TVector3 mc_mom; + TrkUtil tu; + + for (int itrack = 0; itrack < ntracks; itrack++) { + edm4hep::Quantity dndx = dNdx[itrack]; + edm4hep::Quantity smeared_dndx = dndx; + + // find the corresponding MC particle + int MCindex = -1; + for (int ireco = 0; ireco < allRecoParticles.size(); ireco++) { + edm4hep::ReconstructedParticleData rp = allRecoParticles[ireco]; + int track_index = rp.tracks_begin; + if (track_index == itrack) { + MCindex = RP2MC_indices[ireco]; + break; + } + } // end loop on RPs + + if (MCindex < 0 || + MCindex >= + mcParticles + .size()) { // in principle, this should not happen in delphes, + // each track should be matched to a MC particle. + result[itrack] = smeared_dndx; + continue; + } + + edm4hep::MCParticleData mc_part = mcParticles[MCindex]; + + // mom and mass evaluated on gen particles + mc_mom.SetXYZ(mc_part.momentum.x, mc_part.momentum.y, mc_part.momentum.z); + + float bg = mc_mom.Mag() / mc_part.mass; // beta * gamma + float muClu = + tu.Nclusters(bg, 0) * length[itrack]; // avg. number of clusters + + float Ncl = dndx.value * length[itrack]; + Ncl = std::max(muClu + m_scale * (Ncl - muClu), float(0.)); + + result[itrack].type = 0; + result[itrack].value = Ncl / length[itrack]; + + if (m_debug) { + std::cout << std::endl + << "requested smearing dNdx factor: " << m_scale << std::endl + << "gen part (PID, p): " << mc_part.PDG << " " << mc_mom.Mag() + << std::endl + << "original dNdx: " << dNdx[itrack].value << std::endl; + std::cout << "smeared dNdx : " << result[itrack].value << std::endl; + } + + } // end loop on tracks + + return result; +} + +// ------------------------------------------------------------------------------------------- + +SmearedTracksTOF::SmearedTracksTOF(float scale, bool debug = false) { + + // rescale resolution by this factor + m_scale = scale; + + // debug flag + m_debug = debug; +} + +ROOT::VecOps::RVec SmearedTracksTOF::operator()( + const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &trackdata, + const ROOT::VecOps::RVec &trackerhits, + const ROOT::VecOps::RVec &length, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles) { + // returns a vector of dNdx that is parallel to the collection of full + // tracks (alltracks), i.e. same number of entries, same order. The method + // retrieve the MC particle that is associated to a track, and builds a "track + // state" out of the MC particle and regenerates a new value of the dNdx + + ROOT::VecOps::RVec result; + edm4hep::TrackerHitData dummy; + + int ntracks = length.size(); + int nhits = trackerhits.size(); // 3x size of tracks since 3 hits per track + result.resize(nhits); + + TLorentzVector gen_p4; + + float c_light = 2.99792458e+8; + float mm_to_sec = 1e-03 / c_light; + + edm4hep::TrackerHitData thits_0, thits_1, thits_2; + edm4hep::TrackerHitData smeared_thits_0, smeared_thits_1, smeared_thits_2; + + for (int itrack = 0; itrack < ntracks; itrack++) { + + int idx_tin = trackdata.at(itrack).trackerHits_begin; // at IP + int idx_tpix = + trackdata.at(itrack).trackerHits_begin + 1; // at 1st pixel layer + int idx_tout = trackdata.at(itrack).trackerHits_end - 1; // at calo + + smeared_thits_0 = trackerhits.at(idx_tin); + smeared_thits_1 = trackerhits.at(idx_tpix); + smeared_thits_2 = trackerhits.at(idx_tout); + + // find the corresponding MC particle + int MCindex = -1; + for (int ireco = 0; ireco < allRecoParticles.size(); ireco++) { + edm4hep::ReconstructedParticleData rp = allRecoParticles[ireco]; + int track_index = rp.tracks_begin; + if (track_index == itrack) { + MCindex = RP2MC_indices[ireco]; + break; + } + } // end loop on RPs + + if (MCindex < 0 || + MCindex >= + mcParticles + .size()) { // in principle, this should not happen in delphes, + // each track should be matched to a MC particle. + result[idx_tin] = smeared_thits_0; + result[idx_tpix] = smeared_thits_1; + result[idx_tout] = smeared_thits_2; + continue; + } + + edm4hep::MCParticleData mc_part = mcParticles[MCindex]; + + gen_p4.SetXYZM(mc_part.momentum.x, mc_part.momentum.y, mc_part.momentum.z, + mc_part.mass); + + // everything in second + float mc_tin = mc_part.time; // MC time is already in seconds + float mc_tof = length[itrack] / gen_p4.Beta() * mm_to_sec; + float mc_tout = mc_tin + mc_tof; + float reco_tout = trackerhits.at(idx_tout).time; + float smeared_tout = mc_tout + m_scale * (reco_tout - mc_tout); + + smeared_thits_2.time = smeared_tout; + + result[idx_tin] = smeared_thits_0; + result[idx_tpix] = smeared_thits_1; + result[idx_tout] = smeared_thits_2; + + if (m_debug) { + std::cout << std::endl + << "requested smearing tof factor: " << m_scale << std::endl + << "gen part (PID, p , beta, t_in, L): " << mc_part.PDG << " " + << gen_p4.P() << " " << gen_p4.Beta() << " " << mc_tin * 1e12 + << " " << length[itrack] << std::endl + << "gen t_out (ps): " << mc_tout * 1e12 << std::endl; + std::cout << "reco t_out (ps) : " << reco_tout * 1e12 << std::endl; + std::cout << "smeared t_out (ps) : " << smeared_tout * 1e12 << std::endl; + } + + } // end loop on tracks + + return result; +} + +// ------------------------------------------------------------------------------------------- + +SmearedReconstructedParticle::SmearedReconstructedParticle(float scale, + int type, int mode, + bool debug = false) { + + // rescale resolution by this factor + m_scale = scale; + + // apply rescaling only to particle of given type + // supported: 11 (electrons), 13 (muons), 130 (neutral hadrons), 22 (photon), + // 0 (charged hadrons), -1 (all) + m_type = type; + + // 0: energy, 1: momentum + m_mode = mode; + + // debug flag + m_debug = debug; +} + +ROOT::VecOps::RVec +SmearedReconstructedParticle::operator()( + const ROOT::VecOps::RVec + &allRecoParticles, + const ROOT::VecOps::RVec &RP2MC_indices, + const ROOT::VecOps::RVec &mcParticles) { + + // returns a vector of ReconstructedParticleData + // The method retrieve the MC particle that is associated to the + // ReconstructedParticle, and creates a new ReconstructedParticle with smeared + // parameters. + + int npart = allRecoParticles.size(); + + ROOT::VecOps::RVec result; + result.resize(npart); + + TLorentzVector gen_p4, reco_p4, smeared_p4; + + for (int ipart = 0; ipart < npart; ipart++) { + + edm4hep::ReconstructedParticleData reco_part = allRecoParticles[ipart]; + edm4hep::ReconstructedParticleData smeared_part = reco_part; + + int reco_part_type = abs(reco_part.type); + + // have to manually infer pid of ele/mu from mass because type not stored in + // reco particles + if (abs(reco_part.charge) > 0 and + abs(reco_part.mass - 0.000510999) < 1.e-05) { + reco_part_type = 11; + } else if (abs(reco_part.charge) > 0 and + abs(reco_part.mass - 0.105658) < 1.e-03) { + reco_part_type = 13; + } + + // find the corresponding MC particle + int MCindex = -1; + MCindex = RP2MC_indices[ipart]; + + // smear particle only if MC particle found, else return original particle + // and if type == requested + if (MCindex >= 0 and MCindex < mcParticles.size() and + reco_part_type == m_type) { + edm4hep::MCParticleData mc_part = mcParticles[MCindex]; + + gen_p4.SetXYZM(mc_part.momentum.x, mc_part.momentum.y, mc_part.momentum.z, + mc_part.mass); + reco_p4.SetXYZM(reco_part.momentum.x, reco_part.momentum.y, + reco_part.momentum.z, reco_part.mass); + + float smeared_p = -1; + + if (m_mode == 0) { + // rescale existing smearing of the energy + smeared_part.energy = std::max( + gen_p4.E() + m_scale * (reco_p4.E() - gen_p4.E()), reco_p4.M()); + + // recompute momentum magnitude + smeared_p = std::sqrt(smeared_part.energy * smeared_part.energy - + reco_p4.M() * reco_p4.M()); + + // recompute mom x, y, z using original reco particle direction + smeared_part.momentum.x = + smeared_p * std::sin(reco_p4.Theta()) * std::cos(reco_p4.Phi()); + smeared_part.momentum.y = + smeared_p * std::sin(reco_p4.Theta()) * std::sin(reco_p4.Phi()); + smeared_part.momentum.z = smeared_p * std::cos(reco_p4.Theta()); + + smeared_p4.SetXYZM(smeared_part.momentum.x, smeared_part.momentum.y, + smeared_part.momentum.z, smeared_part.mass); + } + + // momentum resolution mode + else if (m_mode == 1) { + // rescale existing momentum smearing + smeared_p = + std::max(float(gen_p4.P() + m_scale * (reco_p4.P() - gen_p4.P())), + float(0.)); + + // recompute energy + smeared_part.energy = + std::sqrt(smeared_p * smeared_p + reco_p4.M() * reco_p4.M()); + // recompute mom x, y, z using original reco particle direction + smeared_part.momentum.x = + smeared_p * std::sin(reco_p4.Theta()) * std::cos(reco_p4.Phi()); + smeared_part.momentum.y = + smeared_p * std::sin(reco_p4.Theta()) * std::sin(reco_p4.Phi()); + smeared_part.momentum.z = smeared_p * std::cos(reco_p4.Theta()); + + } + + // return mc truth particle + else if (m_mode == -1) { + smeared_part.energy = gen_p4.E(); + smeared_p = gen_p4.P(); + smeared_p4 = gen_p4; + + // recompute mom x, y, z using original reco particle direction + smeared_part.momentum.x = gen_p4.Px(); + smeared_part.momentum.y = gen_p4.Py(); + smeared_part.momentum.z = gen_p4.Pz(); + + // set type + smeared_part.type = mc_part.PDG; + } + + if (m_debug) { + std::cout << std::endl + << "requested smearing energy factor: " << m_scale + << std::endl + << "gen part (PID, p, theta, phi, m): " << mc_part.PDG + << " " << gen_p4.P() << " " << gen_p4.Theta() << " " + << gen_p4.Phi() << " " << gen_p4.M() << std::endl + << "reco part (PID, p, theta, phi, m): " << reco_part_type + << " " << reco_p4.P() << " " << reco_p4.Theta() << " " + << reco_p4.Phi() << " " << reco_p4.M() << std::endl; + std::cout << "smeared part (PID, p, theta, phi, m): " + << smeared_part.type << " " << smeared_p4.P() << " " + << smeared_p4.Theta() << " " << smeared_p4.Phi() << " " + << smeared_p4.M() << std::endl; + } + } + + result[ipart] = smeared_part; + + } // end loop on particles + + return result; +} + +} // namespace SmearObjects +} // namespace FCCAnalyses diff --git a/analyzers/dataframe/src/VertexFinderActs.cc b/analyzers/dataframe/src/VertexFinderActs.cc index ad8eac8726..75853a190c 100644 --- a/analyzers/dataframe/src/VertexFinderActs.cc +++ b/analyzers/dataframe/src/VertexFinderActs.cc @@ -68,7 +68,8 @@ VertexFinderAMVF(ROOT::VecOps::RVec tracks ){ // Set up deterministic annealing with user-defined temperatures std::vector temperatures{8.0, 4.0, 2.0, 1.4142136, 1.2247449, 1.0}; - Acts::AnnealingUtility::Config annealingConfig(temperatures); + Acts::AnnealingUtility::Config annealingConfig; + annealingConfig.setOfTemperatures = temperatures; Acts::AnnealingUtility annealingUtility(annealingConfig); @@ -101,7 +102,8 @@ VertexFinderAMVF(ROOT::VecOps::RVec tracks ){ using Finder = Acts::AdaptiveMultiVertexFinder; //using Finder = Acts::AdaptiveMultiVertexFinder; //Finder::Config finderConfig(std::move(fitter), seedFinder, ipEstimator, linearizer); - Finder::Config finderConfig(std::move(fitter), seedFinder, ipEstimator, linearizer, bField); + Finder::Config finderConfig = {std::move(fitter), seedFinder, ipEstimator, + std::move(linearizer), bField}; // We do not want to use a beamspot constraint here finderConfig.useBeamSpotConstraint = false; diff --git a/analyzers/dataframe/src/VertexFinderLCFIPlus.cc b/analyzers/dataframe/src/VertexFinderLCFIPlus.cc new file mode 100644 index 0000000000..d7ca7a2308 --- /dev/null +++ b/analyzers/dataframe/src/VertexFinderLCFIPlus.cc @@ -0,0 +1,1021 @@ +// reference: https://arxiv.org/pdf/1506.08371.pdf +// contact: kunal.gautam@cern.ch + +#include "FCCAnalyses/VertexFinderLCFIPlus.h" +#include + +namespace FCCAnalyses{ + +namespace VertexFinderLCFIPlus{ + +bool debug_me = false; +// if particle masses defined in a dedicated file, call those rather than defining here +const double m_pi = 0.13957039; // pi+- mass [GeV] +const double m_p = 0.93827208; // p+- mass [GeV] +const double m_e = 0.00051099; // e+- mass [GeV] +// + +ROOT::VecOps::RVec> get_SV_jets(ROOT::VecOps::RVec recoparticles, + ROOT::VecOps::RVec thetracks, + VertexingUtils::FCCAnalysesVertex PV, + ROOT::VecOps::RVec isInPrimary, + ROOT::VecOps::RVec jets, + std::vector> jet_consti, + bool V0_rej, + double chi2_cut, double invM_cut, double chi2Tr_cut) { + + // find SVs using LCFI+ (clustering first) + + ROOT::VecOps::RVec> result; + + ROOT::VecOps::RVec np_tracks; + + // retrieve tracks from reco particles & get a vector with their indices in the reco collection + ROOT::VecOps::RVec tracks = ReconstructedParticle2Track::getRP2TRK( recoparticles, thetracks ); + ROOT::VecOps::RVec reco_ind_tracks = ReconstructedParticle2Track::get_recoindTRK( recoparticles, thetracks ); + if(tracks.size() != reco_ind_tracks.size()) std::cout<<"ERROR: reco index vector not the same size as no of tracks"< i_jetconsti = jet_consti[j]; + for (int ctr=0; ctr tracks_fin = V0rejection_tight(np_tracks, PV, V0_rej); + + if(debug_me) { + std::cout< i_result = findSVfromTracks(tracks_fin, thetracks, PV, chi2_cut, invM_cut, chi2Tr_cut); + // + result.push_back(i_result); + + // clean-up + i_result.clear(); + np_tracks.clear(); + tracks_fin.clear(); + } + + if(debug_me) std::cout<<"no more SVs can be reconstructed"< get_SV_event(ROOT::VecOps::RVec recoparticles, + ROOT::VecOps::RVec thetracks, + VertexingUtils::FCCAnalysesVertex PV, + ROOT::VecOps::RVec isInPrimary, + bool V0_rej, + double chi2_cut, double invM_cut, double chi2Tr_cut) { + + // find SVs using LCFI+ (w/o clustering) + + if(debug_me) std::cout << "Starting SV finding!" << std::endl; + + ROOT::VecOps::RVec result; + + // retrieve the tracks associated to the recoparticles + ROOT::VecOps::RVec tracks = ReconstructedParticle2Track::getRP2TRK( recoparticles, thetracks ); + + if(debug_me) std::cout<<"tracks extracted from the reco particles"< np_tracks; + for(unsigned int i=0; i tracks_fin = V0rejection_tight(np_tracks, PV, V0_rej); + + if(debug_me) { + std::cout< get_SV_event(ROOT::VecOps::RVec np_tracks, + ROOT::VecOps::RVec thetracks, + VertexingUtils::FCCAnalysesVertex PV, + bool V0_rej, + double chi2_cut, double invM_cut, double chi2Tr_cut) { + + // find SVs from non-primary tracks using LCFI+ (w/o clustering) + // primary - non-primary separation done externally + + ROOT::VecOps::RVec result; + + // V0 rejection (tight) - perform V0 rejection with tight constraints if user chooses + ROOT::VecOps::RVec tracks_fin = V0rejection_tight(np_tracks, PV, V0_rej); + + if(debug_me) { + std::cout< VertexSeed_best(ROOT::VecOps::RVec tracks, + VertexingUtils::FCCAnalysesVertex PV, + double chi2_cut, double invM_cut) { + + // gives indices of the best pair of tracks + + ROOT::VecOps::RVec result; + int isel = 0; + int jsel = 1; + + int nTr = tracks.size(); + // push empty tracks to make a size=2 vector + ROOT::VecOps::RVec tr_pair; + edm4hep::TrackState tr_i, tr_j; + tr_pair.push_back(tr_i); + tr_pair.push_back(tr_j); + VertexingUtils::FCCAnalysesVertex vtx_seed; + double chi2_min = 99; + + for(unsigned int i=0; i isInV0 = isV0(tr_pair, PV, false); + if(isInV0[0] && isInV0[1]) continue; + + vtx_seed = VertexFitterSimple::VertexFitter_Tk(2, tr_pair); + + // Constraints check + bool pass = check_constraints(vtx_seed, tr_pair, PV, true, chi2_cut, invM_cut); + if(!pass) continue; + + // if a pair passes all constraints compare chi2, store lowest chi2 + double chi2_seed = vtx_seed.vertex.chi2; // normalised but nDOF=1 for nTr=2 + if(chi2_seed < chi2_min) { + isel = i; jsel =j; + chi2_min = chi2_seed; + } + } + } + + if(chi2_min != 99){ + result.push_back(isel); + result.push_back(jsel); + } + return result; +} + +ROOT::VecOps::RVec addTrack_best(ROOT::VecOps::RVec tracks, + ROOT::VecOps::RVec vtx_tr, + VertexingUtils::FCCAnalysesVertex PV, + double chi2_cut, double invM_cut, double chi2Tr_cut) { + // adds index of the best track to the (seed) vtx + + ROOT::VecOps::RVec result = vtx_tr; + if(tracks.size() == vtx_tr.size()) return result; + + int isel = -1; + + int nTr = tracks.size(); + ROOT::VecOps::RVec tr_vtx; + VertexingUtils::FCCAnalysesVertex vtx; + double chi2_min = 99; + + // add tracks of the previously formed vtx to a vector + for(int tr : vtx_tr) { + if(debug_me) std::cout << "Track integer: " << tr << std::endl; + if(debug_me) std::cout << "Track value: " << tracks[tr] << std::endl; + tr_vtx.push_back(tracks[tr]); + } + int iTr = tr_vtx.size(); + // add an empty track to increase vector size by 1 + edm4hep::TrackState tr_i; + tr_vtx.push_back(tr_i); + + // find best track to add to the vtx + for(unsigned int i=0; i=0) result.push_back(isel); + return result; +} + +ROOT::VecOps::RVec V0rejection_tight(ROOT::VecOps::RVec tracks, + VertexingUtils::FCCAnalysesVertex PV, + bool V0_rej) { + // perform V0 rejection with tight constraints if user chooses + ROOT::VecOps::RVec result; + if(V0_rej) { + bool tight = true; + ROOT::VecOps::RVec isInV0 = isV0(tracks, PV, tight); + for(unsigned int i=0; i findSVfromTracks(ROOT::VecOps::RVec tracks_fin, + const ROOT::VecOps::RVec& alltracks, + VertexingUtils::FCCAnalysesVertex PV, + double chi2_cut, double invM_cut, double chi2Tr_cut) { + + // find SVs (only if there are 2 or more tracks) + ROOT::VecOps::RVec result; + + while(tracks_fin.size() > 1) { + // find vertex seed + ROOT::VecOps::RVec vtx_seed = VertexSeed_best(tracks_fin, PV, chi2_cut, invM_cut); + + if(debug_me){ + std::cout << "tracks_fin.size(): " << tracks_fin.size() << std::endl; + for(int i=0; i vtx_fin = vtx_seed; + int vtx_fin_size = 0; // to start the loop + while(vtx_fin_size != vtx_fin.size()) { + vtx_fin_size = vtx_fin.size(); + vtx_fin = addTrack_best(tracks_fin, vtx_fin, PV, chi2_cut, invM_cut, chi2Tr_cut); + } + + // fit tracks to SV and remove from tracks_fin + ROOT::VecOps::RVec tr_vtx_fin; + for(int i_tr : vtx_fin){ + tr_vtx_fin.push_back(tracks_fin[i_tr]); + if(debug_me) std::cout << "Pushing back tracks_fin[i_tr]" << std::endl; + } + VertexingUtils::FCCAnalysesVertex sec_vtx = VertexFitterSimple::VertexFitter_Tk(2, tr_vtx_fin, alltracks); // flag 2 for SVs + + // see if we can also get indices in the reco collection (for tracks forming an SV) + //sec_vtx.reco_ind = VertexFitterSimple::get_reco_ind(recoparticles,thetracks); // incorrect + + result.push_back(sec_vtx); + // + ROOT::VecOps::RVec temp = tracks_fin; + tracks_fin.clear(); + for(unsigned int t=0; t tracks, + VertexingUtils::FCCAnalysesVertex PV, + bool seed, + double chi2_cut, double invM_cut, double chi2Tr_cut) { + // if all constraints pass -> true + // if any constraint fails -> false + + bool result = true; + + int nTr = tracks.size(); // no of tracks + + // Constraints + // chi2 < cut (9) + double chi2 = vtx.vertex.chi2; // normalised + double nDOF = 2*nTr - 3; // nDOF + chi2 = chi2 * nDOF; + if(chi2 >= chi2_cut) result = false; + // + // invM < cut (10GeV) + double invM = VertexingUtils::get_invM(vtx); + if(invM >= invM_cut) result = false; + // + // invM < sum of energy + double E_tracks = 0.; + for(edm4hep::TrackState tr_e : tracks) E_tracks += VertexingUtils::get_trackE(tr_e); + if(invM >= E_tracks) result = false; + // + // momenta sum & vtx r on same side + double angle = VertexingUtils::get_PV2vtx_angle(tracks, vtx, PV); + if(angle<0) result = false; + // + if(!seed) { + // chi2_contribution(track) < threshold + ROOT::VecOps::RVec chi2_tr = vtx.reco_chi2; + if(chi2_tr[nTr-1] >= chi2Tr_cut) result = false; // threshold = 5 ok? + } + // + return result; +} + +ROOT::VecOps::RVec isV0(ROOT::VecOps::RVec np_tracks, + VertexingUtils::FCCAnalysesVertex PV, + bool tight) { + // V0 rejection + // + // take all non-primary tracks & assign "true" to pairs that form V0 + // if(tight) -> tight constraints + // if(!tight) -> loose constraints + + int nTr = np_tracks.size(); + + ROOT::VecOps::RVec result(nTr, false); + // true -> forms a V0, false -> doesn't form a V0 + if(nTr<2) return result; + + // set constraints (if(tight==true) tight_set) + ROOT::VecOps::RVec isKs = constraints_Ks(tight); + ROOT::VecOps::RVec isLambda0 = constraints_Lambda0(tight); + ROOT::VecOps::RVec isGamma = constraints_Gamma(tight); + + ROOT::VecOps::RVec t_pair; + // push empty tracks to make a size=2 vector + edm4hep::TrackState tr_i, tr_j; + t_pair.push_back(tr_i); + t_pair.push_back(tr_j); + VertexingUtils::FCCAnalysesVertex V0; + // + for(unsigned int i=0; i 0) continue; // don't pair tracks with same charge (same sign curvature = same sign charge) + t_pair[1] = np_tracks[j]; + + ROOT::VecOps::RVec V0_cand = get_V0candidate(V0, t_pair, PV, false); + + // Ks + if(V0_cand[0]>isKs[0] && V0_cand[0]isKs[2] && V0_cand[5]>isKs[3]) { + result[i] = true; + result[j] = true; + break; + } + + // Lambda0 + else if(V0_cand[1]>isLambda0[0] && V0_cand[1]isLambda0[2] && V0_cand[5]>isLambda0[3]) { + result[i] = true; + result[j] = true; + break; + } + else if(V0_cand[2]>isLambda0[0] && V0_cand[2]isLambda0[2] && V0_cand[5]>isLambda0[3]) { + result[i] = true; + result[j] = true; + break; + } + + // photon conversion + else if(V0_cand[3]isGamma[2] && V0_cand[5]>isGamma[3]) { + result[i] = true; + result[j] = true; + break; + } + // + } + } + + return result; +} + + +/////////////////////////// +//** V0 Reconstruction **// +/////////////////////////// + +VertexingUtils::FCCAnalysesV0 get_V0s(ROOT::VecOps::RVec np_tracks, + VertexingUtils::FCCAnalysesVertex PV, + bool tight, + double chi2_cut) { + // V0 reconstruction + // if(tight) -> tight constraints + // if(!tight) -> loose constraints + + VertexingUtils::FCCAnalysesV0 result; + ROOT::VecOps::RVec vtx; // FCCAnalyses vertex object + ROOT::VecOps::RVec pdgAbs; // absolute PDG ID + ROOT::VecOps::RVec invM; // invariant mass + result.vtx = vtx; + result.pdgAbs = pdgAbs; + result.invM = invM; + + VertexingUtils::FCCAnalysesVertex V0_vtx; + + int nTr = np_tracks.size(); + if(nTr<2) return result; + ROOT::VecOps::RVec isInV0(nTr, false); + + // set constraints (if(tight==true) tight_set) + ROOT::VecOps::RVec isKs = constraints_Ks(tight); + ROOT::VecOps::RVec isLambda0 = constraints_Lambda0(tight); + ROOT::VecOps::RVec isGamma = constraints_Gamma(tight); + + ROOT::VecOps::RVec tr_pair; + // push empty tracks to make a size=2 vector + edm4hep::TrackState tr_i, tr_j; + tr_pair.push_back(tr_i); + tr_pair.push_back(tr_j); + // + for(unsigned int i=0; i 0) continue; // don't pair tracks with same charge (same sign curvature = same sign charge) + tr_pair[1] = np_tracks[j]; + + ROOT::VecOps::RVec V0_cand = get_V0candidate(V0_vtx, tr_pair, PV, true, chi2_cut); + if(V0_cand[0] == -1) continue; + + // Ks + if(V0_cand[0]>isKs[0] && V0_cand[0]isKs[2] && V0_cand[5]>isKs[3]) { + if(debug_me) std::cout<<"Found a Ks"<isLambda0[0] && V0_cand[1]isLambda0[2] && V0_cand[5]>isLambda0[3]) { + if(debug_me) std::cout<<"Found a Lambda0"<isLambda0[0] && V0_cand[2]isLambda0[2] && V0_cand[5]>isLambda0[3]) { + if(debug_me) std::cout<<"Found a Lambda0"<isGamma[2] && V0_cand[5]>isGamma[3]) { + if(debug_me) std::cout<<"Found a Photon coversion"< np_tracks, + VertexingUtils::FCCAnalysesVertex PV, + double Ks_invM_low, double Ks_invM_high, double Ks_dis, double Ks_cosAng, + double Lambda_invM_low, double Lambda_invM_high, double Lambda_dis, double Lambda_cosAng, + double Gamma_invM_low, double Gamma_invM_high, double Gamma_dis, double Gamma_cosAng, + double chi2_cut) { + // V0 reconstruction + // by default set to the tight set of constraints + + VertexingUtils::FCCAnalysesV0 result; + ROOT::VecOps::RVec vtx; // FCCAnalyses vertex object + ROOT::VecOps::RVec pdgAbs; // absolute PDG ID + ROOT::VecOps::RVec invM; // invariant mass + result.vtx = vtx; + result.pdgAbs = pdgAbs; + result.invM = invM; + + VertexingUtils::FCCAnalysesVertex V0_vtx; + + int nTr = np_tracks.size(); + if(nTr<2) return result; + ROOT::VecOps::RVec isInV0(nTr, false); + + // set constraints (if(tight==true) tight_set) + ROOT::VecOps::RVec isKs = constraints_Ks(Ks_invM_low, Ks_invM_high, Ks_dis, Ks_cosAng); + ROOT::VecOps::RVec isLambda0 = constraints_Lambda0(Lambda_invM_low, Lambda_invM_high, Lambda_dis, Lambda_cosAng); + ROOT::VecOps::RVec isGamma = constraints_Gamma(Gamma_invM_low, Gamma_invM_high, Gamma_dis, Gamma_cosAng); + + ROOT::VecOps::RVec tr_pair; + // push empty tracks to make a size=2 vector + edm4hep::TrackState tr_i, tr_j; + tr_pair.push_back(tr_i); + tr_pair.push_back(tr_j); + // + for(unsigned int i=0; i 0) continue; // don't pair tracks with same charge (same sign curvature = same sign charge) + tr_pair[1] = np_tracks[j]; + + ROOT::VecOps::RVec V0_cand = get_V0candidate(V0_vtx, tr_pair, PV, true, chi2_cut); + if(V0_cand[0] == -1) continue; + + // Ks + if(V0_cand[0]>isKs[0] && V0_cand[0]isKs[2] && V0_cand[5]>isKs[3]) { + if(debug_me) std::cout<<"Found a Ks"<isLambda0[0] && V0_cand[1]isLambda0[2] && V0_cand[5]>isLambda0[3]) { + if(debug_me) std::cout<<"Found a Lambda0"<isLambda0[0] && V0_cand[2]isLambda0[2] && V0_cand[5]>isLambda0[3]) { + if(debug_me) std::cout<<"Found a Lambda0"<isGamma[2] && V0_cand[5]>isGamma[3]) { + if(debug_me) std::cout<<"Found a Photon coversion"< recoparticles, + ROOT::VecOps::RVec thetracks, + ROOT::VecOps::RVec isInPrimary, + ROOT::VecOps::RVec jets, + std::vector> jet_consti, + VertexingUtils::FCCAnalysesVertex PV, + bool tight, + double chi2_cut) { + // V0 reconstruction after jet clustering + // if(tight) -> tight constraints + // if(!tight) -> loose constraints + + VertexingUtils::FCCAnalysesV0 result; + ROOT::VecOps::RVec vtx; // FCCAnalyses vertex object + ROOT::VecOps::RVec pdgAbs; // absolute PDG ID + ROOT::VecOps::RVec invM; // invariant mass + ROOT::VecOps::RVec nSV_jet(jets.size(),0); + result.vtx = vtx; + result.pdgAbs = pdgAbs; + result.invM = invM; + result.nSV_jet = nSV_jet; + + VertexingUtils::FCCAnalysesVertex V0_vtx; + + int n_par = recoparticles.size(); + if(n_par<2) return result; + + // set constraints (if(tight==true) tight_set) + ROOT::VecOps::RVec isKs = constraints_Ks(tight); + ROOT::VecOps::RVec isLambda0 = constraints_Lambda0(tight); + ROOT::VecOps::RVec isGamma = constraints_Gamma(tight); + + ROOT::VecOps::RVec tr_pair; + // push empty tracks to make a size=2 vector + edm4hep::TrackState tr_i, tr_j; + tr_pair.push_back(tr_i); + tr_pair.push_back(tr_j); + + // find V0s inside the jet loop (only from non-primary tracks) + ROOT::VecOps::RVec np_tracks; + + ROOT::VecOps::RVec tracks = ReconstructedParticle2Track::getRP2TRK( recoparticles, thetracks ); + ROOT::VecOps::RVec reco_ind_tracks = ReconstructedParticle2Track::get_recoindTRK( recoparticles, thetracks ); + if(tracks.size() != reco_ind_tracks.size()) std::cout<<"ERROR: reco index vector not the same size as no of tracks"< i_jetconsti = jet_consti[j]; + for (int ctr=0; ctr isInV0(nTr, false); + // + for(unsigned int i=0; i 0) continue; // don't pair tracks with same charge (same sign curvature = same sign charge) + tr_pair[1] = np_tracks[j]; + + ROOT::VecOps::RVec V0_cand = get_V0candidate(V0_vtx, tr_pair, PV, true, chi2_cut); + if(V0_cand[0] == -1) continue; + + // Ks + if(V0_cand[0]>isKs[0] && V0_cand[0]isKs[2] && V0_cand[5]>isKs[3]) { + if(debug_me) std::cout<<"Found a Ks"<isLambda0[0] && V0_cand[1]isLambda0[2] && V0_cand[5]>isLambda0[3]) { + if(debug_me) std::cout<<"Found a Lambda0"<isLambda0[0] && V0_cand[2]isLambda0[2] && V0_cand[5]>isLambda0[3]) { + if(debug_me) std::cout<<"Found a Lambda0"<isGamma[2] && V0_cand[5]>isGamma[3]) { + if(debug_me) std::cout<<"Found a Photon coversion"< get_V0candidate(VertexingUtils::FCCAnalysesVertex &V0_vtx, + ROOT::VecOps::RVec tr_pair, + VertexingUtils::FCCAnalysesVertex PV, + bool chi2, + double chi2_cut) +{ + // get invariant mass, distance from PV, and colliniarity variables for all V0 candidates + + // [0] -> invM_Ks [GeV] + // [1] -> invM_Lambda1 [GeV] + // [2] -> invM_Lambda2 [GeV] + // [3] -> invM_Gamma [GeV] + // [4] -> r (distance from PV) [mm] + // [5] -> r.p (colinearity) [r & p - unit vectors] + // skip the candidate with output entries = -1 + + ROOT::VecOps::RVec result(6, -1); + + edm4hep::Vector3f r_PV = PV.vertex.position; // in mm + + V0_vtx = VertexFitterSimple::VertexFitter_Tk(2, tr_pair); + + if(chi2) { + // constraint on chi2: chi2 < cut (9) + double chi2_V0 = V0_vtx.vertex.chi2; // normalised but nDOF=1 + if(chi2_V0 >= chi2_cut) return result; + } + + // invariant masses for V0 candidates + result[0] = VertexingUtils::get_invM_pairs(V0_vtx, m_pi, m_pi); + result[1] = VertexingUtils::get_invM_pairs(V0_vtx, m_pi, m_p); + result[2] = VertexingUtils::get_invM_pairs(V0_vtx, m_p, m_pi); + result[3] = VertexingUtils::get_invM_pairs(V0_vtx, m_e, m_e); + + // V0 candidate distance from PV + edm4hep::Vector3f r_V0 = V0_vtx.vertex.position; // in mm + TVector3 r_V0_PV(r_V0[0] - r_PV[0], r_V0[1] - r_PV[1], r_V0[2] - r_PV[2]); + result[4] = r_V0_PV.Mag(); // in mm + + // angle b/n V0 candidate momentum & PV-V0 displacement vector + result[5] = VertexingUtils::get_PV2V0angle(V0_vtx, PV); + + return result; +} + +// functions to fill constraint thresholds +// tight -> tight constraints +// !tight -> loose constraints +// +// [0] -> invariant mass lower limit [GeV] +// [1] -> invariant mass upper limit [GeV] +// [2] -> distance from PV [mm] +// [3] -> colinearity + +ROOT::VecOps::RVec constraints_Ks(bool tight) { + + ROOT::VecOps::RVec result(4, 0); + + if(tight) { + result[0] = 0.493; + result[1] = 0.503; + result[2] = 0.5; + result[3] = 0.999; + } + + else { + result[0] = 0.488; + result[1] = 0.508; + result[2] = 0.3; + result[3] = 0.999; + } + // + return result; +} + +ROOT::VecOps::RVec constraints_Lambda0(bool tight) { + + ROOT::VecOps::RVec result(4, 0); + + if(tight) { + result[0] = 1.111; + result[1] = 1.121; + result[2] = 0.5; + result[3] = 0.99995; + } + + else { + result[0] = 1.106; + result[1] = 1.126; + result[2] = 0.3; + result[3] = 0.999; + } + // + return result; +} + +ROOT::VecOps::RVec constraints_Gamma(bool tight) { + + ROOT::VecOps::RVec result(4, 0); + + if(tight) { + result[1] = 0.005; + result[2] = 9; + result[3] = 0.99995; + } + + else { + result[1] = 0.01; + result[2] = 9; + result[3] = 0.999; + } + // + return result; +} + +// User set constraints +ROOT::VecOps::RVec constraints_Ks(double invM_low, double invM_high, double dis, double cosAng) { + + ROOT::VecOps::RVec result(4, 0); + + result[0] = invM_low; + result[1] = invM_high; + result[2] = dis; + result[3] = cosAng; + // + return result; +} + +ROOT::VecOps::RVec constraints_Lambda0(double invM_low, double invM_high, double dis, double cosAng) { + + ROOT::VecOps::RVec result(4, 0); + + result[0] = invM_low; + result[1] = invM_high; + result[2] = dis; + result[3] = cosAng; + // + return result; +} + +ROOT::VecOps::RVec constraints_Gamma(double invM_low, double invM_high, double dis, double cosAng) { + + ROOT::VecOps::RVec result(4, 0); + + result[0] = invM_low; + result[1] = invM_high; + result[2] = dis; + result[3] = cosAng; + // + return result; +} + +// ROOT::VecOps::RVec> VertexSeed_all(ROOT::VecOps::RVec tracks, +// VertexingUtils::FCCAnalysesVertex PV, +// double chi2_cut, double invM_cut) { + + // // gives indices of the all pairs of tracks which pass the constraints + + // ROOT::VecOps::RVec> result; + // ROOT::VecOps::RVec ij_sel; + + // int nTr = tracks.size(); + // ROOT::VecOps::RVec tr_pair; + // // push empty tracks to make a size=2 vector + // edm4hep::TrackState tr_i; + // edm4hep::TrackState tr_j; + // tr_pair.push_back(tr_i); + // tr_pair.push_back(tr_j); + // VertexingUtils::FCCAnalysesVertex vtx_seed; + +// for(unsigned int i=0; i isInV0 = isV0(tr_pair, PV, false); +// if(isInV0[0] && isInV0[1]) continue; + +// vtx_seed = VertexFitterSimple::VertexFitter_Tk(2, tr_pair); + +// // Constraints +// bool pass = check_constraints(vtx_seed, tr_pair, PV, true, chi2_cut, invM_cut, chi2Tr_cut); +// if(!pass) continue; + +// // if a pair passes all constraints, store indices +// ij_sel.push_back(i); ij_sel.push_back(j); +// result.push_back(ij_sel); +// ij_sel.clear(); +// } +// } + +// return result; +// } + + +// ROOT::VecOps::RVec addTrack_multi(ROOT::VecOps::RVec tracks, +// ROOT::VecOps::RVec vtx_tr, +// VertexingUtils::FCCAnalysesVertex PV, +// double chi2_cut, double invM_cut, double chi2Tr_cut) { +// // adds indices of all tracks passing constraints to the (seed) vtx + +// ROOT::VecOps::RVec result = vtx_tr; +// if(tracks.size() == vtx_tr.size()) return result; + +// int nTr = tracks.size(); +// ROOT::VecOps::RVec tr_vtx; +// VertexingUtils::FCCAnalysesVertex vtx; + +// // tracks from the previously formed vtx +// for(int tr : vtx_tr) { +// tr_vtx.push_back(tracks[tr]); +// } +// int iTr = tr_vtx.size(); + +// // find best track to add to the vtx +// for(unsigned int i=0; i - -#include "TFile.h" -#include "TString.h" - -namespace FCCAnalyses{ - -namespace VertexFitterSimple{ - -TVector3 ParToP(TVectorD Par){ - double fB = 2; // 2 Tesla - - Double_t C = Par(2); - Double_t phi0 = Par(1); - Double_t ct = Par(4); - // - TVector3 Pval; - Double_t pt = fB*0.2998 / TMath::Abs(2 * C); - Pval(0) = pt*TMath::Cos(phi0); - Pval(1) = pt*TMath::Sin(phi0); - Pval(2) = pt*ct; - // - return Pval; -} - - -TVectorD XPtoPar(TVector3 x, TVector3 p, Double_t Q){ - - double fB = 2; // 2 Tesla - - // - TVectorD Par(5); - // Transverse parameters - Double_t a = -Q*fB*0.2998; // Units are Tesla, GeV and meters - Double_t pt = p.Pt(); - Double_t C = a / (2 * pt); // Half curvature - //cout << "ObsTrk::XPtoPar: fB = " << fB << ", a = " << a << ", pt = " << pt << ", C = " << C << endl; - Double_t r2 = x.Perp2(); - Double_t cross = x(0)*p(1) - x(1)*p(0); - Double_t T = TMath::Sqrt(pt*pt - 2 * a*cross + a*a*r2); - Double_t phi0 = TMath::ATan2((p(1) - a*x(0)) / T, (p(0) + a*x(1)) / T); // Phi0 - Double_t D; // Impact parameter D - if (pt < 10.0) D = (T - pt) / a; - else D = (-2 * cross + a*r2) / (T + pt); - // - Par(0) = D; // Store D - Par(1) = phi0; // Store phi0 - Par(2) = C; // Store C - //Longitudinal parameters - Double_t B = C*TMath::Sqrt(TMath::Max(r2 - D*D,0.0) / (1 + 2 * C*D)); - Double_t st = TMath::ASin(B) / C; - Double_t ct = p(2) / pt; - Double_t z0 = x(2) - ct*st; - // - Par(3) = z0; // Store z0 - Par(4) = ct; // Store cot(theta) - // - return Par; -} - - -// -//TH1F* hTry; -// -Double_t FastRv(TVectorD p1, TVectorD p2){ - // - // Find radius of intersection between two tracks in the transverse plane - // - // p = (D,phi, C) - // - // Solving matrix - TMatrixDSym H(2); - H(0, 0) = -TMath::Cos(p2(1)); - H(0, 1) = TMath::Cos(p1(1)); - H(1, 0) = -TMath::Sin(p2(1)); - H(1, 1) = TMath::Sin(p1(1)); - Double_t Det = TMath::Sin(p2(1) - p1(1)); - H *= 1.0 / Det; - // - // Convergence parameters - Int_t Ntry = 0; - Int_t NtryMax = 100; - Double_t eps = 1000.; - Double_t epsMin = 1.0e-6; - // - // Vertex finding loop - // - TVectorD cterm(2); - cterm(0) = p1(0); - cterm(1) = p2(0); - TVectorD xv(2); - Double_t R = 1000.; - while (eps > epsMin) - { - xv = H * cterm; - Ntry++; - if (Ntry > NtryMax) - { - std::cout << "FastRv: maximum number of iteration reached" << std::endl; - break; - } - Double_t Rnew = TMath::Sqrt(xv(0) * xv(0) + xv(1) * xv(1)); - eps = Rnew - R; - R = Rnew; - cterm(0) = p1(2) * R * R; - cterm(1) = p2(2) * R * R; - } - // - return R; -} -TMatrixDSym RegInv3(TMatrixDSym &Smat0){ - // - // Regularized inversion of symmetric 3x3 matrix with positive diagonal elements - // - TMatrixDSym Smat = Smat0; - Int_t N = Smat.GetNrows(); - if (N != 3){ - std::cout << "RegInv3 called with matrix size != 3. Abort & return standard inversion." << std::endl; - return Smat.Invert(); - } - TMatrixDSym D(N); D.Zero(); - Bool_t dZero = kTRUE; // No elements less or equal 0 on the diagonal - for (Int_t i = 0; i < N; i++) if (Smat(i, i) <= 0.0)dZero = kFALSE; - if (dZero){ - for (Int_t i = 0; i < N; i++) D(i, i) = 1.0 / TMath::Sqrt(Smat(i, i)); - TMatrixDSym RegMat = Smat.Similarity(D); - TMatrixDSym Q(2); - for (Int_t i = 0; i < 2; i++){ - for (Int_t j = 0; j < 2; j++)Q(i, j) = RegMat(i, j); - } - Double_t Det = 1 - Q(0, 1)*Q(1, 0); - TMatrixDSym H(2); - H = Q; - H(0, 1) = -Q(0, 1); - H(1, 0) = -Q(1, 0); - TVectorD p(2); - p(0) = RegMat(0, 2); - p(1) = RegMat(1, 2); - Double_t pHp = H.Similarity(p); - Double_t h = pHp-Det; - // - TMatrixDSym pp(2); pp.Rank1Update(p); - TMatrixDSym F = (h*H) - pp.Similarity(H); - F *= 1.0 / Det; - TVectorD b = H*p; - TMatrixDSym InvReg(3); - for (Int_t i = 0; i < 2; i++) - { - InvReg(i, 2) = b(i); - InvReg(2, i) = b(i); - for (Int_t j = 0; j < 2; j++) InvReg(i, j) = F(i, j); - } - InvReg(2, 2) = -Det; - // - InvReg *= 1.0 / h; - // - // - return InvReg.Similarity(D); - } - else - { - //std::cout << "RegInv3: found negative elements in diagonal. Return standard inversion." << std::endl; - return Smat.Invert(); - } -} -// -// -// -TMatrixD Fill_A(TVectorD par, Double_t phi){ - // - // Derivative of track 3D position vector with respect to track parameters at constant phase - // - // par = vector of track parameters - // phi = phase - // - TMatrixD A(3, 5); - // - // Decode input arrays - // - Double_t D = par(0); - Double_t p0 = par(1); - Double_t C = par(2); - Double_t z0 = par(3); - Double_t ct = par(4); - // - // Fill derivative matrix dx/d alpha - // D - A(0, 0) = -TMath::Sin(p0); - A(1, 0) = TMath::Cos(p0); - A(2, 0) = 0.0; - // phi0 - A(0, 1) = -D*TMath::Cos(p0) + (TMath::Cos(phi + p0) - TMath::Cos(p0)) / (2 * C); - A(1, 1) = -D*TMath::Sin(p0) + (TMath::Sin(phi + p0) - TMath::Sin(p0)) / (2 * C); - A(2, 1) = 0.0; - // C - A(0, 2) = -(TMath::Sin(phi + p0) - TMath::Sin(p0)) / (2 * C*C); - A(1, 2) = (TMath::Cos(phi + p0) - TMath::Cos(p0)) / (2 * C*C); - A(2, 2) = -ct*phi / (2 * C*C); - // z0 - A(0, 3) = 0.0; - A(1, 3) = 0.0; - A(2, 3) = 1.0; - // ct = lambda - A(0, 4) = 0.0; - A(1, 4) = 0.0; - A(2, 4) = phi / (2 * C); - // - return A; -} - -// -TVectorD Fill_a(TVectorD par, Double_t phi){ - // - // Derivative of track 3D position vector with respect to phase at constant track parameters - // - // par = vector of track parameters - // phi = phase - // - TVectorD a(3); - // - // Decode input arrays - // - Double_t D = par(0); - Double_t p0 = par(1); - Double_t C = par(2); - Double_t z0 = par(3); - Double_t ct = par(4); - // - a(0) = TMath::Cos(phi + p0) / (2 * C); - a(1) = TMath::Sin(phi + p0) / (2 * C); - a(2) = ct / (2 * C); - // - return a; -} -// - -TVectorD Fill_x0(TVectorD par){ - // - // Calculate track 3D position at R = |D| (minimum approach to z-axis) - // - TVectorD x0(3); - // - // Decode input arrays - // - Double_t D = par(0); - Double_t p0 = par(1); - Double_t C = par(2); - Double_t z0 = par(3); - Double_t ct = par(4); - // - x0(0) = -D *TMath::Sin(p0); - x0(1) = D*TMath::Cos(p0); - x0(2) = z0; - // - return x0; -} - -// -TVectorD Fill_x(TVectorD par, Double_t phi){ - // - // Calculate track 3D position for a given phase, phi - // - TVectorD x(3); - // - // Decode input arrays - // - Double_t D = par(0); - Double_t p0 = par(1); - Double_t C = par(2); - Double_t z0 = par(3); - Double_t ct = par(4); - // - TVectorD x0 = Fill_x0(par); - x(0) = x0(0) + (TMath::Sin(phi + p0) - TMath::Sin(p0)) / (2 * C); - x(1) = x0(1) - (TMath::Cos(phi + p0) - TMath::Cos(p0)) / (2 * C); - x(2) = x0(2) + ct*phi / (2 * C); - // - return x; -} - - - -VertexingUtils::FCCAnalysesVertex VertexFitter( int Primary, - ROOT::VecOps::RVec recoparticles, - ROOT::VecOps::RVec thetracks, - bool BeamSpotConstraint, - double bsc_sigmax, double bsc_sigmay, double bsc_sigmaz, - double bsc_x, double bsc_y, double bsc_z ) { - - - - // input = a collection of recoparticles (in case one want to make associations to RecoParticles ?) - // and thetracks = the collection of all TrackState in the event - - VertexingUtils::FCCAnalysesVertex thevertex; - - // retrieve the tracks associated to the recoparticles - ROOT::VecOps::RVec tracks = ReconstructedParticle2Track::getRP2TRK( recoparticles, thetracks ); - - // and run the vertex fitter - - //FCCAnalysesVertex thevertex = VertexFitter_Tk( Primary, tracks, thetracks) ; - thevertex = VertexFitter_Tk( Primary, tracks, - BeamSpotConstraint, bsc_sigmax, bsc_sigmay, bsc_sigmaz, bsc_x, bsc_y, bsc_z ); - - //fill the indices of the tracks - ROOT::VecOps::RVec reco_ind; - int Ntr = tracks.size(); - for (auto & p: recoparticles) { - //std::cout << " in VertexFitter: a recoparticle with charge = " << p.charge << std::endl; - if ( p.tracks_begin >=0 && p.tracks_begin tracks, - bool BeamSpotConstraint, - double bsc_sigmax, double bsc_sigmay, double bsc_sigmaz, - double bsc_x, double bsc_y, double bsc_z ) { - - // Units for the beam-spot : mum - // See https://github.com/HEP-FCC/FCCeePhysicsPerformance/tree/master/General#generating-events-under-realistic-fcc-ee-environment-conditions - - // final results : - VertexingUtils::FCCAnalysesVertex TheVertex; - - edm4hep::VertexData result; - ROOT::VecOps::RVec reco_chi2; - ROOT::VecOps::RVec< TVectorD > updated_track_parameters; - ROOT::VecOps::RVec reco_ind; - ROOT::VecOps::RVec final_track_phases; - ROOT::VecOps::RVec< TVector3 > updated_track_momentum_at_vertex; - - TheVertex.vertex = result; - TheVertex.reco_chi2 = reco_chi2; - TheVertex.updated_track_parameters = updated_track_parameters; - TheVertex.reco_ind = reco_ind; - TheVertex.final_track_phases = final_track_phases; - TheVertex.updated_track_momentum_at_vertex = updated_track_momentum_at_vertex; - - - int Ntr = tracks.size(); - TheVertex.ntracks = Ntr; - if ( Ntr <= 1) return TheVertex; // can not reconstruct a vertex with only one track... - - - bool debug = false; - if (debug) std::cout << " enter in VertexFitter_Tk for the Bs decay vertex " << std::endl; - - // if a beam-spot constraint is required : - TMatrixDSym BeamSpotCovI(3); - TVectorD BeamSpotPos(3); - if (BeamSpotConstraint) { // fill in the inverse of the covariance matrix. Convert the units into meters - BeamSpotCovI(0,0) = 1./pow( bsc_sigmax * 1e-6, 2) ; // mum to m - BeamSpotCovI(1,1) = 1./pow( bsc_sigmay * 1e-6, 2) ; - BeamSpotCovI(2,2) = 1./pow( bsc_sigmaz * 1e-6, 2) ; - BeamSpotPos(0) = bsc_x * 1e-6; - BeamSpotPos(1) = bsc_y * 1e-6 ; - BeamSpotPos(2) = bsc_z * 1e-6 ; - } - - Double_t *final_chi2 = new Double_t[Ntr]; - Double_t *final_phases = new Double_t[Ntr]; - std::vector< TVectorD > final_delta_alpha ; - TVectorD dummy(5); - for (int i=0; i < Ntr; i++) { - final_delta_alpha.push_back( dummy ); - } - - - // - // Vertex fit (units are meters) - // - // Initial variable definitions - TVectorD x0(3); for (Int_t v = 0; v < 3; v++)x0(v) = 100.; // set to large value - Double_t Chi2 = 0; - // - - TVectorD x(3); - TMatrixDSym covX(3); - - - // Stored quantities - Double_t *fi = new Double_t[Ntr]; // Phases - TVectorD **x0i = new TVectorD*[Ntr]; // Track expansion point - TVectorD **ai = new TVectorD*[Ntr]; // dx/dphi - Double_t *a2i = new Double_t[Ntr]; // a'Wa - TMatrixDSym **Di = new TMatrixDSym*[Ntr]; // W-WBW - TMatrixDSym **Wi = new TMatrixDSym*[Ntr]; // (ACA')^-1 - TMatrixDSym **Winvi = new TMatrixDSym*[Ntr]; // ACA' - TMatrixD **Ai = new TMatrixD*[Ntr]; // A - TMatrixDSym **Covi = new TMatrixDSym*[Ntr]; // Cov matrix of the track parameters - - // - // vertex radius approximation - // Maximum impact parameter - Double_t Rd = 0; - for (Int_t i = 0; i < Ntr; i++) - { - //ObsTrk* t = tracks[i]; - //TVectorD par = t->GetObsPar(); - edm4hep::TrackState t = tracks[i] ; - TVectorD par = VertexingUtils::get_trackParam( t ) ; - Double_t Dabs = TMath::Abs(par(0)); - if (Dabs > Rd)Rd = Dabs; - } - // - // Find track pair with largest phi difference - Int_t isel; Int_t jsel; // selected track indices - Double_t dphiMax = -9999.; // Max phi difference - for (Int_t i = 0; i < Ntr-1; i++) - { - //ObsTrk* ti = tracks[i]; - //TVectorD pari = ti->GetObsPar(); - edm4hep::TrackState ti = tracks[i] ; - TVectorD pari = VertexingUtils::get_trackParam( ti ); - Double_t phi1 = pari(1); - - for (Int_t j = i+1; j < Ntr; j++) - { - //ObsTrk* tj = tracks[j]; - //TVectorD parj = tj->GetObsPar(); - edm4hep::TrackState tj = tracks[j]; - TVectorD parj = VertexingUtils::get_trackParam( tj ); - Double_t phi2 = parj(1); - Double_t dphi = TMath::Abs(phi2 - phi1); - if (dphi > TMath::Pi())dphi = TMath::TwoPi() - dphi; - if (dphi > dphiMax) - { - isel = i; jsel = j; - dphiMax = dphi; - } - } - } - // - // - //ObsTrk* t1 = tracks[isel]; - //TVectorD p1 = t1->GetObsPar(); - edm4hep::TrackState t1 = tracks[isel]; - TVectorD p1 = VertexingUtils::get_trackParam( t1 ); - //ObsTrk* t2 = tracks[jsel]; - //TVectorD p2 = t2->GetObsPar(); - edm4hep::TrackState t2 = tracks[jsel]; - TVectorD p2 = VertexingUtils::get_trackParam( t2 ); - Double_t R = FastRv(p1, p2); - if (R > 1.0) R = Rd; - R = 0.5 * (R + Rd); - // - // Iteration properties - // - Int_t Ntry = 0; - Int_t TryMax = 100; - if (BeamSpotConstraint) TryMax = TryMax * 5; - Double_t eps = 1.0e-9; // vertex stability - Double_t epsi = 1000.; - // - while (epsi > eps && Ntry < TryMax) // Iterate until found vertex is stable - { - x.Zero(); - TVectorD cterm(3); TMatrixDSym H(3); TMatrixDSym DW1D(3); - covX.Zero(); // Reset vertex covariance - cterm.Zero(); // Reset constant term - H.Zero(); // Reset H matrix - DW1D.Zero(); - // - for (Int_t i = 0; i < Ntr; i++) - { - // Get track helix parameters and their covariance matrix - //ObsTrk *t = tracks[i]; - //TVectorD par = t->GetObsPar(); - //TMatrixDSym Cov = t->GetCov(); - edm4hep::TrackState t = tracks[i] ; - TVectorD par = VertexingUtils::get_trackParam( t ) ; - TMatrixDSym Cov = VertexingUtils::get_trackCov( t ); - Covi[i] = new TMatrixDSym(Cov); // Store matrix - Double_t fs; - if (Ntry <= 0) // Initialize all phases on first pass - { - Double_t D = par(0); - Double_t C = par(2); - Double_t arg = TMath::Max(1.0e-6, (R*R - D*D) / (1 + 2 * C*D)); - fs = 2 * TMath::ASin(C*TMath::Sqrt(arg)); - fi[i] = fs; - } - // - // Starting values - // - fs = fi[i]; // Get phase - TVectorD xs = Fill_x(par, fs); - x0i[i] = new TVectorD(xs); // Start helix position - // W matrix = (A*C*A')^-1; W^-1 = A*C*A' - TMatrixD A = Fill_A(par, fs); // A = dx/da = derivatives wrt track parameters - Ai[i] = new TMatrixD(A); // Store matrix - TMatrixDSym Winv = Cov.Similarity(A); // W^-1 = A*C*A' - Winvi[i] = new TMatrixDSym(Winv); // Store W^-1 matrix - TMatrixDSym W = RegInv3(Winv); // W = (A*C*A')^-1 - Wi[i] = new TMatrixDSym(W); // Store W matrix - TVectorD a = Fill_a(par, fs); // a = dx/ds = derivatives wrt phase - ai[i] = new TVectorD(a); // Store a - Double_t a2 = W.Similarity(a); - a2i[i] = a2; // Store a2 - // Build D matrix - TMatrixDSym B(3); - B.Rank1Update(a, 1.0); - B *= -1. / a2; - B.Similarity(W); - TMatrixDSym Ds = W+B; // D matrix - Di[i] = new TMatrixDSym(Ds); // Store D matrix - TMatrixDSym DsW1Ds = Winv.Similarity(Ds); // Service matrix to calculate covX - DW1D += DsW1Ds; - // Update hessian - H += Ds; - // update constant term - cterm += Ds * xs; - } // End loop on tracks - // - - TMatrixDSym H0 = H; - - if (BeamSpotConstraint) { - H += BeamSpotCovI ; - cterm += BeamSpotCovI * BeamSpotPos ; - DW1D += BeamSpotCovI ; - } - - // update vertex position - TMatrixDSym H1 = RegInv3(H); - x = H1*cterm; - - // Update vertex covariance - covX = DW1D.Similarity(H1); - - // Update phases and chi^2 - Chi2 = 0.0; - for (Int_t i = 0; i < Ntr; i++) - { - TVectorD lambda = (*Di[i])*(*x0i[i] - x); - TMatrixDSym Wm1 = *Winvi[i]; - Double_t addChi2 = Wm1.Similarity(lambda);; - //Chi2 += Wm1.Similarity(lambda); - Chi2 += addChi2; - final_chi2[i] = addChi2; - TVectorD a = *ai[i]; - TVectorD b = (*Wi[i])*(x - *x0i[i]); - for (Int_t j = 0; j < 3; j++)fi[i] += a(j)*b(j) / a2i[i]; - final_phases[i] = fi[i]; - - TMatrixD ta(TMatrixD::kTransposed, *Ai[i]); - TMatrixDSym kk(5); - kk = *Covi[i]; - final_delta_alpha[i] = kk * ta * lambda; // that's minus delta_alpha - } - // - - TVectorD dx = x - x0; - x0 = x; - // update vertex stability - TMatrixDSym Hess = RegInv3(covX); - - epsi = Hess.Similarity(dx); - Ntry++; - //if ( Ntry >= TryMax) std::cout << " ... in VertexFitterSimple, Ntry >= TryMax " << std::endl; - - if (BeamSpotConstraint) { - - // add the following term to the chi2 : - TVectorD dx_beamspot = x - BeamSpotPos ; - Double_t chi2_bsc = BeamSpotCovI.Similarity( dx_beamspot ); - //Chi2 += chi2_bsc -3; - Chi2 += chi2_bsc ; - - } - - - - // - // Cleanup - // - for (Int_t i = 0; i < Ntr; i++) - { - x0i[i]->Clear(); - Winvi[i]->Clear(); - Wi[i]->Clear(); - ai[i]->Clear(); - Di[i]->Clear(); - Ai[i]->Clear(); - Covi[i]->Clear(); - - delete x0i[i]; - delete Winvi[i]; - delete Wi[i]; - delete ai[i]; - delete Di[i]; - delete Ai[i]; - delete Covi[i]; - } - } - // - delete[] fi; // Phases - delete[] x0i; // Track expansion point - delete[] ai; // dx/dphi - delete[] a2i; // a'Wa - delete[] Di; // W-WBW - delete[] Wi; // (ACA')^-1 - delete[] Winvi; // ACA' - delete[] Ai ; // A - delete[] Covi; // Cov - - // - //return Chi2; - - // store the results in an edm4hep::VertexData object - // go back from meters to millimeters for the units - float conv = 1e3; - std::array covMatrix; // covMat in edm4hep is a LOWER-triangle matrix. - covMatrix[0] = covX(0,0) * pow(conv,2); - covMatrix[1] = covX(1,0) * pow(conv,2); - covMatrix[2] = covX(1,1) * pow(conv,2); - covMatrix[3] = covX(2,0) * pow(conv,2); - covMatrix[4] = covX(2,1) * pow(conv,2); - covMatrix[5] = covX(2,2) * pow(conv,2); - - float Ndof = 2.0 * Ntr - 3.0; ; - - result.primary = Primary; - result.chi2 = Chi2 /Ndof ; // I store the normalised chi2 here - result.position = edm4hep::Vector3f( x(0)*conv, x(1)*conv, x(2)*conv ) ; // store the vertex in mm - result.covMatrix = covMatrix; - result.algorithmType = 1; - - // Need to fill the associations ... - - double scale0 = 1e-3; //convert mm to m - double scale1 = 1; - double scale2 = 0.5*1e3; // C = rho/2, convert from mm-1 to m-1 - double scale3 = 1e-3 ; //convert mm to m - double scale4 = 1.; - - scale2 = -scale2 ; // sign of omega (sign convention) - - for (Int_t i = 0; i < Ntr; i++) { - - edm4hep::TrackState t = tracks[i] ; - TVectorD par = VertexingUtils::get_trackParam( t ) ; - - // initial momentum : - //TVector3 ptrack_ini = ParToP( par ); - //std::cout << "----- Track # " << i << " initial track momentum : " << std::endl; - //ptrack_ini.Print(); - - // uncomment below to get the post-fit track parameters : - par -= final_delta_alpha[i] ; - - //std::cout << " Track i = " << i << " --- delta_alpha : " << std::endl; - //final_delta_alpha[i].Print(); - - // ( px, py, pz) of the track - TVector3 ptrack = ParToP( par ); - //std::cout << " updates track param :" << std::endl; - //ptrack.Print(); - - // and (px, py) at the vertex instead of the dca : - double phi0 = par(1); - double phi = final_phases[i] ; - double px_at_vertex = ptrack.Pt() * TMath::Cos( phi0 + phi ); - double py_at_vertex = ptrack.Pt() * TMath::Sin( phi0 + phi ); - TVector3 ptrack_at_vertex( px_at_vertex, py_at_vertex, ptrack.Pz() ); - //std::cout << " momentum at the vertex : " << std::endl; - //std::cout << " phi0 at dca = " << phi0 << " phi at vertex = " << phi0+phi << " C = " << par(2) << " phase " << phi << std::endl; - //ptrack_at_vertex.Print(); - - updated_track_momentum_at_vertex.push_back( ptrack_at_vertex ); - - // back to EDM4HEP units... - par[0] = par[0] / scale0 ; - par[1] = par[1] / scale1 ; - par[2] = par[2] / scale2 ; - par[3] = par[3] / scale3 ; - par[4] = par[4] / scale4 ; - updated_track_parameters.push_back( par ); - - reco_chi2.push_back( final_chi2[i] ); - final_track_phases.push_back( final_phases[i] ); - - } - - TheVertex.vertex = result; - TheVertex.reco_chi2 = reco_chi2; - TheVertex.reco_ind = reco_ind; - TheVertex.updated_track_parameters = updated_track_parameters ; - TheVertex.updated_track_momentum_at_vertex = updated_track_momentum_at_vertex; - TheVertex.final_track_phases = final_track_phases; - - //std::cout << " end of VertexFitter " << std::endl; - /* - for ( Int_t i = 0; i < Ntr; i++) { - std::cout << " Track #" << i << " chi2 = " << reco_chi2[i] << std::endl; - std::cout << " Initial parameters: " << std::endl; - VertexingUtils::get_trackParam( tracks[i] ).Print(); - std::cout << " Updated parameters : " << std::endl; - updated_track_parameters[i].Print(); - } - */ - - delete[] final_chi2; - delete[] final_phases; - - return TheVertex; -} - - -//////////////////////////////////////////////////// - - - -ROOT::VecOps::RVec get_PrimaryTracks( VertexingUtils::FCCAnalysesVertex initialVertex, - ROOT::VecOps::RVec tracks, - bool BeamSpotConstraint, - double bsc_sigmax, double bsc_sigmay, double bsc_sigmaz, - double bsc_x, double bsc_y, double bsc_z, - int ipass ) { - - -// iterative procedure to determine the primary vertex - and the primary tracks -// Start from a vertex reconstructed from all tracks, remove the one with the highest chi2, fit again etc - -// tracks = the collection of tracks that was used in the first step - -//bool debug = true ; - bool debug = false; -float CHI2MAX = 25 ; - -if (debug) { - if (ipass == 0) std::cout << " \n --------------------------------------------------------\n" << std::endl; - std::cout << " ... enter in get_PrimaryTracks ipass = " << ipass << std::endl; - if (ipass == 0) std::cout << " initial number of tracks = " << tracks.size() << std::endl; -} - -ROOT::VecOps::RVec seltracks = tracks; -ROOT::VecOps::RVec reco_chi2 = initialVertex.reco_chi2; - -if ( seltracks.size() <= 1 ) return seltracks; - -int isPrimaryVertex = initialVertex.vertex.primary ; - -int maxElementIndex = std::max_element(reco_chi2.begin(),reco_chi2.end()) - reco_chi2.begin(); -auto minmax = std::minmax_element(reco_chi2.begin(), reco_chi2.end()); -float chi2max = *minmax.second ; - -if ( chi2max < CHI2MAX ) { - if (debug) { - std::cout << " --- DONE, all tracks have chi2 < CHI2MAX " << std::endl; - std::cout << " number of primary tracks selected = " << seltracks.size() << std::endl; - - } - return seltracks ; -} - - if (debug) { - std::cout << " remove a track that has chi2 = " << chi2max << std::endl; - } - -seltracks.erase( seltracks.begin() + maxElementIndex ); -ipass ++; - - VertexingUtils::FCCAnalysesVertex vtx = VertexFitter_Tk( isPrimaryVertex, - seltracks, - BeamSpotConstraint, - bsc_sigmax, bsc_sigmay, bsc_sigmaz, - bsc_x, bsc_y, bsc_z ) ; - - return get_PrimaryTracks( vtx, seltracks, BeamSpotConstraint, bsc_sigmax, bsc_sigmay, bsc_sigmaz, - bsc_x, bsc_y, bsc_z, ipass ) ; - - - -} - - -ROOT::VecOps::RVec get_NonPrimaryTracks( ROOT::VecOps::RVec allTracks, - ROOT::VecOps::RVec primaryTracks ) { - - ROOT::VecOps::RVec result; - for (auto & track: allTracks) { - bool isInPrimary = false; - for ( auto & primary: primaryTracks) { - if ( track.D0 == primary.D0 && track.Z0 == primary.Z0 && track.phi == primary.phi && track.omega == primary.omega && track.tanLambda == primary.tanLambda ) { - isInPrimary = true; - break; - } - } - if ( !isInPrimary) result.push_back( track ); - } - - return result; -} - - -ROOT::VecOps::RVec IsPrimary_forTracks( ROOT::VecOps::RVec allTracks, - ROOT::VecOps::RVec primaryTracks ) { - - ROOT::VecOps::RVec result; - for (auto & track: allTracks) { - bool isInPrimary = false; - for ( auto & primary: primaryTracks) { - if ( track.D0 == primary.D0 && track.Z0 == primary.Z0 && track.phi == primary.phi && track.omega == primary.omega && track.tanLambda == primary.tanLambda ) { - isInPrimary = true; - break; - } - } - result.push_back( isInPrimary ); - } - return result; -} - -}//end NS VertexFitterSimple - -}//end NS FCCAnalyses +#include "FCCAnalyses/VertexFitterSimple.h" +#include "FCCAnalyses/MCParticle.h" + +#include + +#include "TFile.h" +#include "TString.h" + +//#include "TrkUtil.h" // from delphes + +namespace FCCAnalyses { + +namespace VertexFitterSimple { + +// ----------------------------------------------------------------------------- + +VertexingUtils::FCCAnalysesVertex VertexFitter( + int Primary, + ROOT::VecOps::RVec recoparticles, + ROOT::VecOps::RVec thetracks, bool BeamSpotConstraint, + double bsc_sigmax, double bsc_sigmay, double bsc_sigmaz, double bsc_x, + double bsc_y, double bsc_z) { + + // input = a collection of recoparticles (in case one want to make + // associations to RecoParticles ?) and thetracks = the collection of all + // TrackState in the event + + VertexingUtils::FCCAnalysesVertex thevertex; + + // retrieve the tracks associated to the recoparticles + ROOT::VecOps::RVec tracks = + ReconstructedParticle2Track::getRP2TRK(recoparticles, thetracks); + + // and run the vertex fitter + + // FCCAnalysesVertex thevertex = VertexFitter_Tk( Primary, tracks, thetracks) + // ; + thevertex = + VertexFitter_Tk(Primary, tracks, thetracks, BeamSpotConstraint, + bsc_sigmax, bsc_sigmay, bsc_sigmaz, bsc_x, bsc_y, bsc_z); + + // fill the indices of the tracks + ROOT::VecOps::RVec reco_ind; + int Ntr = tracks.size(); + for (auto &p : recoparticles) { + // std::cout << " in VertexFitter: a recoparticle with charge = " << + // p.charge << std::endl; + if (p.tracks_begin >= 0 && p.tracks_begin < thetracks.size()) { + reco_ind.push_back(p.tracks_begin); + } + } + if (reco_ind.size() != Ntr) + std::cout << " ... problem in Vertex, size of reco_ind != Ntr " + << std::endl; + + thevertex.reco_ind = reco_ind; + + return thevertex; +} + +// --------------------------------------------------------------------------------------------------------------------------- + +VertexingUtils::FCCAnalysesVertex +VertexFitter_Tk(int Primary, ROOT::VecOps::RVec tracks, + bool BeamSpotConstraint, double bsc_sigmax, double bsc_sigmay, + double bsc_sigmaz, double bsc_x, double bsc_y, double bsc_z) { + + ROOT::VecOps::RVec dummy; + return VertexFitter_Tk(Primary, tracks, dummy, BeamSpotConstraint, bsc_sigmax, + bsc_sigmay, bsc_sigmaz, bsc_x, bsc_y, bsc_z); +} + +// --------------------------------------------------------------------------------------------------------------------------- + +VertexingUtils::FCCAnalysesVertex +VertexFitter_Tk(int Primary, ROOT::VecOps::RVec tracks, + const ROOT::VecOps::RVec &alltracks, + bool BeamSpotConstraint, double bsc_sigmax, double bsc_sigmay, + double bsc_sigmaz, double bsc_x, double bsc_y, double bsc_z) { + + // Units for the beam-spot : mum + // See + // https://github.com/HEP-FCC/FCCeePhysicsPerformance/tree/master/General#generating-events-under-realistic-fcc-ee-environment-conditions + + // final results : + VertexingUtils::FCCAnalysesVertex TheVertex; + + edm4hep::VertexData result; + ROOT::VecOps::RVec reco_chi2; + ROOT::VecOps::RVec updated_track_parameters; + ROOT::VecOps::RVec reco_ind; + ROOT::VecOps::RVec final_track_phases; + ROOT::VecOps::RVec updated_track_momentum_at_vertex; + + // if the collection of all tracks has been passed, keep trace of the indices + // of the tracks that are used to fit this vertex + if (alltracks.size() > 0) { + for (int i = 0; i < tracks.size(); i++) { // the fitted tracks + edm4hep::TrackState tr1 = tracks[i]; + for (int j = 0; j < alltracks.size(); + j++) { // the collection of all tracks + edm4hep::TrackState tr2 = alltracks[j]; + if (VertexingUtils::compare_Tracks(tr1, tr2)) { + reco_ind.push_back(j); + break; + } + } + } + } + + TheVertex.vertex = result; + TheVertex.reco_chi2 = reco_chi2; + TheVertex.updated_track_parameters = updated_track_parameters; + TheVertex.reco_ind = reco_ind; + TheVertex.final_track_phases = final_track_phases; + TheVertex.updated_track_momentum_at_vertex = updated_track_momentum_at_vertex; + + int Ntr = tracks.size(); + TheVertex.ntracks = Ntr; + if (Ntr <= 1) + return TheVertex; // can not reconstruct a vertex with only one track... + + TVectorD **trkPar = new TVectorD *[Ntr]; + TMatrixDSym **trkCov = new TMatrixDSym *[Ntr]; + + bool Units_mm = true; + + for (Int_t i = 0; i < Ntr; i++) { + edm4hep::TrackState t = tracks[i]; + TVectorD par = VertexingUtils::get_trackParam(t, Units_mm); + trkPar[i] = new TVectorD(par); + TMatrixDSym Cov = VertexingUtils::get_trackCov(t, Units_mm); + trkCov[i] = new TMatrixDSym(Cov); + } + + VertexFit theVertexFit(Ntr, trkPar, trkCov); + + if (BeamSpotConstraint) { + float conv_BSC = 1e-3; // convert mum to mm + TVectorD xv_BS(3); + xv_BS[0] = bsc_x * conv_BSC; + xv_BS[1] = bsc_y * conv_BSC; + xv_BS[2] = bsc_z * conv_BSC; + TMatrixDSym cov_BS(3); + cov_BS[0][0] = pow(bsc_sigmax * conv_BSC, 2); + cov_BS[1][1] = pow(bsc_sigmay * conv_BSC, 2); + cov_BS[2][2] = pow(bsc_sigmaz * conv_BSC, 2); + theVertexFit.AddVtxConstraint(xv_BS, cov_BS); + } + + TVectorD x = theVertexFit.GetVtx(); // this actually runs the fit + + result.position = + edm4hep::Vector3f(x(0), x(1), x(2)); // vertex position in mm + + // store the results in an edm4hep::VertexData object + + float Chi2 = theVertexFit.GetVtxChi2(); + float Ndof = 2.0 * Ntr - 3.0; + ; + result.chi2 = Chi2 / Ndof; + + // the chi2 of all the tracks : + TVectorD tracks_chi2 = theVertexFit.GetVtxChi2List(); + for (int it = 0; it < Ntr; it++) { + reco_chi2.push_back(tracks_chi2[it]); + } + + // std::cout << " Fitted vertex: " << x(0)*conv << " " << x(1)*conv << " " << + // x(2)*conv << std::endl; + TMatrixDSym covX = theVertexFit.GetVtxCov(); + std::array + covMatrix; // covMat in edm4hep is a LOWER-triangle matrix. + covMatrix[0] = covX(0, 0); + covMatrix[1] = covX(1, 0); + covMatrix[2] = covX(1, 1); + covMatrix[3] = covX(2, 0); + covMatrix[4] = covX(2, 1); + covMatrix[5] = covX(2, 2); + result.covMatrix = covMatrix; + + result.algorithmType = 1; + + result.primary = Primary; + + TheVertex.vertex = result; + + // Use VertexMore to retrieve more information : + VertexMore theVertexMore(&theVertexFit, Units_mm); + + for (Int_t i = 0; i < Ntr; i++) { + + TVectorD updated_par = + theVertexFit.GetNewPar(i); // updated track parameters + TVectorD updated_par_edm4hep = + VertexingUtils::Delphes2Edm4hep_TrackParam(updated_par, Units_mm); + updated_track_parameters.push_back(updated_par_edm4hep); + + // Momenta of the tracks at the vertex: + TVector3 ptrack_at_vertex = theVertexMore.GetMomentum(i); + updated_track_momentum_at_vertex.push_back(ptrack_at_vertex); + } + + TheVertex.updated_track_parameters = updated_track_parameters; + TheVertex.updated_track_momentum_at_vertex = updated_track_momentum_at_vertex; + TheVertex.final_track_phases = final_track_phases; + TheVertex.reco_chi2 = reco_chi2; + + // memory cleanup + for (Int_t i = 0; i < Ntr; i++) { + delete trkPar[i]; + delete trkCov[i]; + } + delete[] trkPar; + delete[] trkCov; + + return TheVertex; +} + +// --------------------------------------------------------------------------------------------------------------------------- + +ROOT::VecOps::RVec +get_PrimaryTracks(ROOT::VecOps::RVec tracks, + bool BeamSpotConstraint, double bsc_sigmax, double bsc_sigmay, + double bsc_sigmaz, double bsc_x, double bsc_y, double bsc_z) { + + // iterative procedure to determine the primary vertex - and the primary + // tracks + + // Feb 2023: Avoid the recursive approach used before... else very very slow, + // with the new VertexFit objects + + // Units for the beam-spot : mum + // See + // https://github.com/HEP-FCC/FCCeePhysicsPerformance/tree/master/General#generating-events-under-realistic-fcc-ee-environment-conditions + + // bool debug = true ; + bool debug = false; + float CHI2MAX = 25; + // float CHI2MAX = 10; + + if (debug) { + std::cout << " ... enter in VertexFitterSimple::get_PrimaryTracks Ntr = " + << tracks.size() << std::endl; + } + + ROOT::VecOps::RVec seltracks = tracks; + + if (seltracks.size() <= 1) + return seltracks; + + int Ntr = tracks.size(); + + TVectorD **trkPar = new TVectorD *[Ntr]; + TMatrixDSym **trkCov = new TMatrixDSym *[Ntr]; + + for (Int_t i = 0; i < Ntr; i++) { + edm4hep::TrackState t = tracks[i]; + TVectorD par = VertexingUtils::get_trackParam(t); + trkPar[i] = new TVectorD(par); + TMatrixDSym Cov = VertexingUtils::get_trackCov(t); + trkCov[i] = new TMatrixDSym(Cov); + } + + VertexFit theVertexFit(Ntr, trkPar, trkCov); + + if (BeamSpotConstraint) { + TVectorD xv_BS(3); + xv_BS[0] = bsc_x * 1e-6; + xv_BS[1] = bsc_y * 1e-6; + xv_BS[2] = bsc_z * 1e-6; + TMatrixDSym cov_BS(3); + cov_BS[0][0] = pow(bsc_sigmax * 1e-6, 2); + cov_BS[1][1] = pow(bsc_sigmay * 1e-6, 2); + cov_BS[2][2] = pow(bsc_sigmaz * 1e-6, 2); + theVertexFit.AddVtxConstraint(xv_BS, cov_BS); + } + + TVectorD x = theVertexFit.GetVtx(); // this actually runs the fit + + float chi2_max = 1e30; + + while (chi2_max >= CHI2MAX) { + + TVectorD tracks_chi2 = theVertexFit.GetVtxChi2List(); + chi2_max = tracks_chi2.Max(); + + int n_removed = 0; + for (int i = 0; i < theVertexFit.GetNtrk(); i++) { + float track_chi2 = tracks_chi2[i]; + if (track_chi2 >= chi2_max) { + theVertexFit.RemoveTrk(i); + seltracks.erase(seltracks.begin() + i); + n_removed++; + } + } + if (n_removed > 0) { + if (theVertexFit.GetNtrk() > 1) { + // run the fit again: + x = theVertexFit.GetVtx(); + TVectorD new_tracks_chi2 = theVertexFit.GetVtxChi2List(); + chi2_max = new_tracks_chi2.Max(); + } else { + chi2_max = 0; // exit from the loop w/o crashing.. + } + } + } // end while + + // memory cleanup : + for (Int_t i = 0; i < Ntr; i++) { + delete trkPar[i]; + delete trkCov[i]; + } + delete[] trkPar; + delete[] trkCov; + + return seltracks; +} + +// --------------------------------------------------------------------------------------------------------------------------- + +ROOT::VecOps::RVec +get_NonPrimaryTracks(ROOT::VecOps::RVec allTracks, + ROOT::VecOps::RVec primaryTracks) { + + ROOT::VecOps::RVec result; + for (auto &track : allTracks) { + bool isInPrimary = false; + for (auto &primary : primaryTracks) { + if (VertexingUtils::compare_Tracks(track, primary)) { + isInPrimary = true; + break; + } + } + if (!isInPrimary) + result.push_back(track); + } + + return result; +} + +// --------------------------------------------------------------------------------------------------------------------------- + +ROOT::VecOps::RVec +IsPrimary_forTracks(ROOT::VecOps::RVec allTracks, + ROOT::VecOps::RVec primaryTracks) { + + ROOT::VecOps::RVec result; + for (auto &track : allTracks) { + bool isInPrimary = false; + for (auto &primary : primaryTracks) { + if (VertexingUtils::compare_Tracks(track, primary)) { + isInPrimary = true; + break; + } + } + result.push_back(isInPrimary); + } + return result; +} + +} // namespace VertexFitterSimple + +} // namespace FCCAnalyses diff --git a/analyzers/dataframe/src/VertexingUtils.cc b/analyzers/dataframe/src/VertexingUtils.cc index 87a689b7ac..9af606a7ab 100644 --- a/analyzers/dataframe/src/VertexingUtils.cc +++ b/analyzers/dataframe/src/VertexingUtils.cc @@ -1,244 +1,1476 @@ -#include "FCCAnalyses/VertexingUtils.h" - -namespace FCCAnalyses{ - -namespace VertexingUtils{ - -// -// Selection of particles based on the d0 / z0 significances of the associated track -// -selTracks::selTracks( float arg_d0sig_min, float arg_d0sig_max, float arg_z0sig_min, float arg_z0sig_max) : m_d0sig_min(arg_d0sig_min), - m_d0sig_max( arg_d0sig_max ), - m_z0sig_min( arg_z0sig_min ), - m_z0sig_max (arg_z0sig_max) { }; -ROOT::VecOps::RVec -selTracks::operator() (ROOT::VecOps::RVec recop, - ROOT::VecOps::RVec tracks ) { - - ROOT::VecOps::RVec result; - result.reserve(recop.size()); - - for (size_t i = 0; i < recop.size(); ++i) { - auto & p = recop[i]; - if (p.tracks_begin m_d0sig_max || fabs( d0sig ) < m_d0sig_min ) continue; - //double z0sig = fabs( tr.Z0 / sqrt( tr.covMatrix[12]) ); - double z0sig = fabs( tr.Z0 / sqrt( tr.covMatrix[9]) ); // covMat = lower-triangle - if ( fabs( z0sig ) > m_z0sig_max || fabs( z0sig ) < m_z0sig_min ) continue; - result.emplace_back(p); - } - } - return result; -} - - -// -// Selection of primary particles based on the matching of RecoParticles -// to MC particles -// -ROOT::VecOps::RVec -SelPrimaryTracks (ROOT::VecOps::RVec recind, ROOT::VecOps::RVec mcind, - ROOT::VecOps::RVec reco, - ROOT::VecOps::RVec mc, - TVector3 MC_EventPrimaryVertex) { - - ROOT::VecOps::RVec result; - result.reserve(reco.size()); - - // Event primary vertex: - double xvtx0 = MC_EventPrimaryVertex[0]; - double yvtx0 = MC_EventPrimaryVertex[1]; - double zvtx0 = MC_EventPrimaryVertex[2]; - - for (unsigned int i=0; i tracks) { - int nt = tracks.size(); - return nt; -} - - -TVectorD -get_trackParam( edm4hep::TrackState & atrack) { - double d0 =atrack.D0 ; - double phi0 = atrack.phi ; - double omega = atrack.omega ; - double z0 = atrack.Z0 ; - double tanlambda = atrack.tanLambda ; - TVectorD res(5); - - double scale0 = 1e-3; //convert mm to m - double scale1 = 1; - double scale2 = 0.5*1e3; // C = rho/2, convert from mm-1 to m-1 - double scale3 = 1e-3 ; //convert mm to m - double scale4 = 1.; - - scale2 = -scale2 ; // sign of omega - - res[0] = d0 * scale0; - res[1] = phi0 * scale1 ; - res[2] = omega * scale2 ; - res[3] = z0 * scale3 ; - res[4] = tanlambda * scale4 ; - return res; -} - -TMatrixDSym -get_trackCov( edm4hep::TrackState & atrack) { - auto covMatrix = atrack.covMatrix; - TMatrixDSym covM(5); - - double scale0 = 1e-3; - double scale1 = 1.; - double scale2 = 0.5*1e3; - double scale3 = 1e-3 ; - double scale4 = 1.; - - scale2 = -scale2 ; // sign of omega - - // covMatrix = lower-triang;e - - covM[0][0] = covMatrix[0] *scale0 * scale0; - - covM[1][0] = covMatrix[1] *scale1 * scale0; - covM[1][1] = covMatrix[2] *scale1 * scale1; - - covM[0][1] = covM[1][0]; - - covM[2][0] = covMatrix[3] *scale2 * scale0; - covM[2][1] = covMatrix[4] *scale2 * scale1; - covM[2][2] = covMatrix[5] *scale2 * scale2; - - covM[0][2] = covM[2][0]; - covM[1][2] = covM[2][1]; - - covM[3][0] = covMatrix[6] *scale3 * scale0; - covM[3][1] = covMatrix[7] *scale3 * scale1; - covM[3][2] = covMatrix[8] *scale3 * scale2; - covM[3][3] = covMatrix[9] *scale3 * scale3; - - covM[0][3] = covM[3][0]; - covM[1][3] = covM[3][1]; - covM[2][3] = covM[3][2]; - - covM[4][0] = covMatrix[10] *scale4 * scale0; - covM[4][1] = covMatrix[11] *scale4 * scale1; - covM[4][2] = covMatrix[12] *scale4 * scale2; - covM[4][3] = covMatrix[13] *scale4 * scale3; - covM[4][4] = covMatrix[14] *scale4 * scale4; - - covM[0][4] = covM[4][0]; - covM[1][4] = covM[4][1]; - covM[2][4] = covM[4][2]; - covM[3][4] = covM[4][3]; - - return covM; -} - - -FCCAnalysesVertex -get_FCCAnalysesVertex(ROOT::VecOps::RVec TheVertexColl, int index ){ - FCCAnalysesVertex result; - if (index TheVertexColl ){ - return TheVertexColl.size(); -} - - -edm4hep::VertexData get_VertexData( FCCAnalysesVertex TheVertex ) { - return TheVertex.vertex ; -} - -ROOT::VecOps::RVec get_VertexData( ROOT::VecOps::RVec TheVertexColl ) { - ROOT::VecOps::RVec result; - for (unsigned int i=0; i TheVertexColl, int index) { - edm4hep::VertexData result; - if (index get_VertexRecoInd( FCCAnalysesVertex TheVertex ) { - return TheVertex.reco_ind; -} - -TVectorD ParToACTS(TVectorD Par){ - - TVectorD pACTS(6); // Return vector - // - double fB=2.; - Double_t b = -0.29988*fB / 2.; - pACTS(0) = 1000*Par(0); // D from m to mm - pACTS(1) = 1000 * Par(3); // z0 from m to mm - pACTS(2) = Par(1); // Phi0 is unchanged - pACTS(3) = TMath::ATan2(1.0,Par(4)); // Theta in [0, pi] range - pACTS(4) = Par(2) / (b*TMath::Sqrt(1 + Par(4)*Par(4))); // q/p in GeV - pACTS(5) = 0.0; // Time: currently undefined - // - return pACTS; -} - - -// Covariance conversion to ACTS format -TMatrixDSym CovToACTS(TMatrixDSym Cov, TVectorD Par){ - - double fB=2.; - TMatrixDSym cACTS(6); cACTS.Zero(); - Double_t b = -0.29988*fB / 2.; - // - // Fill derivative matrix - TMatrixD A(5, 5); A.Zero(); - Double_t ct = Par(4); // cot(theta) - Double_t C = Par(2); // half curvature - A(0, 0) = 1000.; // D-D conversion to mm - A(1, 2) = 1.0; // phi0-phi0 - A(2, 4) = 1.0/(TMath::Sqrt(1.0 + ct*ct) * b); // q/p-C - A(3, 1) = 1000.; // z0-z0 conversion to mm - A(4, 3) = -1.0 / (1.0 + ct*ct); // theta - cot(theta) - A(4, 4) = -C*ct / (b*pow(1.0 + ct*ct,3.0/2.0)); // q/p-cot(theta) - // - TMatrixDSym Cv = Cov; - TMatrixD At(5, 5); - At.Transpose(A); - Cv.Similarity(At); - TMatrixDSub(cACTS, 0, 4, 0, 4) = Cv; - cACTS(5, 5) = 0.1; // Currently undefined: set to arbitrary value to avoid crashes - // - return cACTS; -} - -}//end NS VertexingUtils - -}//end NS FCCAnalyses +#include "FCCAnalyses/VertexingUtils.h" +#include "FCCAnalyses/VertexFitterSimple.h" + +#include "TrkUtil.h" // from delphes + +namespace FCCAnalyses { + +namespace VertexingUtils { + +TVector3 ParToP(TVectorD Par) { + double fB = 2; // 2 Tesla + TrkUtil tu; + return tu.ParToP(Par, fB); +} + +TVectorD XPtoPar(TVector3 x, TVector3 p, Double_t Q) { + double fB = 2; // 2 Tesla + TrkUtil tu; + return tu.XPtoPar(x, p, Q, fB); +} + +// +// Selection of particles based on the d0 / z0 significances of the associated +// track +// +selTracks::selTracks(float arg_d0sig_min, float arg_d0sig_max, + float arg_z0sig_min, float arg_z0sig_max) + : m_d0sig_min(arg_d0sig_min), m_d0sig_max(arg_d0sig_max), + m_z0sig_min(arg_z0sig_min), m_z0sig_max(arg_z0sig_max){}; +ROOT::VecOps::RVec selTracks::operator()( + ROOT::VecOps::RVec recop, + ROOT::VecOps::RVec tracks) { + + ROOT::VecOps::RVec result; + result.reserve(recop.size()); + + for (size_t i = 0; i < recop.size(); ++i) { + auto &p = recop[i]; + if (p.tracks_begin < tracks.size()) { + auto &tr = tracks.at(p.tracks_begin); + double d0sig = fabs(tr.D0 / sqrt(tr.covMatrix[0])); + if (fabs(d0sig) > m_d0sig_max || fabs(d0sig) < m_d0sig_min) + continue; + // double z0sig = fabs( tr.Z0 / sqrt( tr.covMatrix[12]) ); + double z0sig = + fabs(tr.Z0 / sqrt(tr.covMatrix[9])); // covMat = lower-triangle + if (fabs(z0sig) > m_z0sig_max || fabs(z0sig) < m_z0sig_min) + continue; + result.emplace_back(p); + } + } + return result; +} + +// +// Selection of primary particles based on the matching of RecoParticles +// to MC particles +// +ROOT::VecOps::RVec +SelPrimaryTracks(ROOT::VecOps::RVec recind, ROOT::VecOps::RVec mcind, + ROOT::VecOps::RVec reco, + ROOT::VecOps::RVec mc, + TVector3 MC_EventPrimaryVertex) { + + ROOT::VecOps::RVec result; + result.reserve(reco.size()); + + // Event primary vertex: + double xvtx0 = MC_EventPrimaryVertex[0]; + double yvtx0 = MC_EventPrimaryVertex[1]; + double zvtx0 = MC_EventPrimaryVertex[2]; + + for (unsigned int i = 0; i < recind.size(); i++) { + double xvtx = mc.at(mcind.at(i)).vertex.x; + double yvtx = mc.at(mcind.at(i)).vertex.y; + double zvtx = mc.at(mcind.at(i)).vertex.z; + // primary particle ? + double zero = 1e-12; + if (fabs(xvtx - xvtx0) < zero && fabs(yvtx - yvtx0) < zero && + fabs(zvtx - zvtx0) < zero) { + int reco_idx = recind.at(i); + result.push_back(reco.at(reco_idx)); + } + } + return result; +} + +int get_nTracks(ROOT::VecOps::RVec tracks) { + int nt = tracks.size(); + return nt; +} + +bool compare_Tracks(const edm4hep::TrackState &tr1, + const edm4hep::TrackState &tr2) { + if (tr1.D0 == tr2.D0 && tr1.phi == tr2.phi && tr1.omega == tr2.omega && + tr1.Z0 == tr2.Z0 && tr1.tanLambda == tr2.tanLambda && + tr1.time == tr2.time && tr1.referencePoint.x == tr2.referencePoint.x && + tr1.referencePoint.y == tr2.referencePoint.y && + tr1.referencePoint.z == tr2.referencePoint.z) + return true; + return false; +} + +// ---------------------------------------------------------------------------------------- + +// --- Conversion methods between the Delphes and edm4hep conventions + +TVectorD Edm4hep2Delphes_TrackParam(const TVectorD ¶m, bool Units_mm) { + + double conv = 1e-3; // convert mm to m + if (Units_mm) + conv = 1.; + + double scale0 = conv; // convert mm to m if needed + double scale1 = 1; + double scale2 = 0.5 / conv; // C = rho/2, convert from mm-1 to m-1 + double scale3 = conv; // convert mm to m + double scale4 = 1.; + scale2 = -scale2; // sign of omega + + TVectorD result(5); + result[0] = param[0] * scale0; + result[1] = param[1] * scale1; + result[2] = param[2] * scale2; + result[3] = param[3] * scale3; + result[4] = param[4] * scale4; + + return result; +} + +TVectorD Delphes2Edm4hep_TrackParam(const TVectorD ¶m, bool Units_mm) { + + double conv = 1e-3; // convert mm to m + if (Units_mm) + conv = 1.; + + double scale0 = conv; // convert mm to m if needed + double scale1 = 1; + double scale2 = 0.5 / conv; // C = rho/2, convert from mm-1 to m-1 + double scale3 = conv; // convert mm to m + double scale4 = 1.; + scale2 = -scale2; // sign of omega + + TVectorD result(5); + result[0] = param[0] / scale0; + result[1] = param[1] / scale1; + result[2] = param[2] / scale2; + result[3] = param[3] / scale3; + result[4] = param[4] / scale4; + + return result; +} + +TMatrixDSym +Edm4hep2Delphes_TrackCovMatrix(const std::array &covMatrix, + bool Units_mm) { + + // careful: since summer 2022, the covariance matrix in edm4hep is an array of + // 21 floats because the time has been added as a 6th track parameter. But + // Delphes (and k4marlinWrapper) samples still fill it as an array of 15 + // floats. + + double conv = 1e-3; // convert mm to m + if (Units_mm) + conv = 1.; + + double scale0 = conv; // convert mm to m if needed + double scale1 = 1; + double scale2 = 0.5 / conv; // C = rho/2, convert from mm-1 to m-1 + double scale3 = conv; // convert mm to m + double scale4 = 1.; + scale2 = -scale2; // sign of omega + + TMatrixDSym covM(5); + + covM[0][0] = covMatrix[0] * scale0 * scale0; + + covM[1][0] = covMatrix[1] * scale1 * scale0; + covM[1][1] = covMatrix[2] * scale1 * scale1; + + covM[0][1] = covM[1][0]; + + covM[2][0] = covMatrix[3] * scale2 * scale0; + covM[2][1] = covMatrix[4] * scale2 * scale1; + covM[2][2] = covMatrix[5] * scale2 * scale2; + + covM[0][2] = covM[2][0]; + covM[1][2] = covM[2][1]; + + covM[3][0] = covMatrix[6] * scale3 * scale0; + covM[3][1] = covMatrix[7] * scale3 * scale1; + covM[3][2] = covMatrix[8] * scale3 * scale2; + covM[3][3] = covMatrix[9] * scale3 * scale3; + + covM[0][3] = covM[3][0]; + covM[1][3] = covM[3][1]; + covM[2][3] = covM[3][2]; + + covM[4][0] = covMatrix[10] * scale4 * scale0; + covM[4][1] = covMatrix[11] * scale4 * scale1; + covM[4][2] = covMatrix[12] * scale4 * scale2; + covM[4][3] = covMatrix[13] * scale4 * scale3; + covM[4][4] = covMatrix[14] * scale4 * scale4; + + covM[0][4] = covM[4][0]; + covM[1][4] = covM[4][1]; + covM[2][4] = covM[4][2]; + covM[3][4] = covM[4][3]; + + return covM; +} + +std::array Delphes2Edm4hep_TrackCovMatrix(const TMatrixDSym &cov, + bool Units_mm) { + + // careful: since summer 2022, the covariance matrix in edm4hep is an array of + // 21 floats because the time has been added as a 6th track parameter. But + // Delphes (and k4marlinWrapper) samples still fill it as an array of 15 + // floats. + + double conv = 1e-3; // convert mm to m + if (Units_mm) + conv = 1.; + + double scale0 = conv; // convert mm to m if needed + double scale1 = 1; + double scale2 = 0.5 / conv; // C = rho/2, convert from mm-1 to m-1 + double scale3 = conv; // convert mm to m + double scale4 = 1.; + scale2 = -scale2; // sign of omega + + std::array covMatrix; + covMatrix[0] = cov[0][0] / (scale0 * scale0); + covMatrix[1] = cov[1][0] / (scale1 * scale0); + covMatrix[2] = cov[1][1] / (scale1 * scale1); + covMatrix[3] = cov[2][0] / (scale0 * scale2); + covMatrix[4] = cov[2][1] / (scale1 * scale2); + covMatrix[5] = cov[2][2] / (scale2 * scale2); + covMatrix[6] = cov[3][0] / (scale3 * scale0); + covMatrix[7] = cov[3][1] / (scale3 * scale1); + covMatrix[8] = cov[3][2] / (scale3 * scale2); + covMatrix[9] = cov[3][3] / (scale3 * scale3); + covMatrix[10] = cov[4][0] / (scale4 * scale0); + covMatrix[11] = cov[4][1] / (scale4 * scale1); + covMatrix[12] = cov[4][2] / (scale4 * scale2); + covMatrix[13] = cov[4][3] / (scale4 * scale3); + covMatrix[14] = cov[4][4] / (scale4 * scale4); + + for (int i = 15; i < 21; i++) { + covMatrix[i] = 0.; + } + + return covMatrix; +} + +TVectorD get_trackParam(edm4hep::TrackState &atrack, bool Units_mm) { + double d0 = atrack.D0; + double phi0 = atrack.phi; + double omega = atrack.omega; + double z0 = atrack.Z0; + double tanlambda = atrack.tanLambda; + TVectorD param(5); + param[0] = d0; + param[1] = phi0; + param[2] = omega; + param[3] = z0; + param[4] = tanlambda; + TVectorD res = Edm4hep2Delphes_TrackParam(param, Units_mm); + + return res; +} + +TMatrixDSym get_trackCov(edm4hep::TrackState &atrack, bool Units_mm) { + auto covMatrix = atrack.covMatrix; + + TMatrixDSym covM = Edm4hep2Delphes_TrackCovMatrix(covMatrix, Units_mm); + + return covM; +} + +// ---------------------------------------------------------------------------------------- + +float get_trackMom(edm4hep::TrackState &atrack) { + double fB = 2; // 2 Tesla + + float C = -0.5 * 1e3 * atrack.omega; + float phi0 = atrack.phi; + float ct = atrack.tanLambda; + // + float pt = fB * 0.2998 / TMath::Abs(2 * C); + TVector3 p(pt * TMath::Cos(phi0), pt * TMath::Sin(phi0), pt * ct); + float result = p.Mag(); + return result; +} + +FCCAnalysesVertex +get_FCCAnalysesVertex(ROOT::VecOps::RVec TheVertexColl, + int index) { + FCCAnalysesVertex result; + if (index < TheVertexColl.size()) + result = TheVertexColl.at(index); + return result; +} + +int get_Nvertex(ROOT::VecOps::RVec TheVertexColl) { + return TheVertexColl.size(); +} + +edm4hep::VertexData get_VertexData(FCCAnalysesVertex TheVertex) { + return TheVertex.vertex; +} + +ROOT::VecOps::RVec +get_VertexData(ROOT::VecOps::RVec TheVertexColl) { + ROOT::VecOps::RVec result; + for (unsigned int i = 0; i < TheVertexColl.size(); i++) { + result.push_back(TheVertexColl.at(i).vertex); + } + return result; +} + +edm4hep::VertexData +get_VertexData(ROOT::VecOps::RVec TheVertexColl, int index) { + edm4hep::VertexData result; + if (index < TheVertexColl.size()) + result = TheVertexColl.at(index).vertex; + return result; +} + +int get_VertexNtrk(FCCAnalysesVertex TheVertex) { return TheVertex.ntracks; } + +ROOT::VecOps::RVec +get_VertexNtrk(ROOT::VecOps::RVec vertices) { + ROOT::VecOps::RVec result; + for (auto &TheVertex : vertices) { + result.push_back(TheVertex.ntracks); + } + return result; +} + +ROOT::VecOps::RVec get_VertexRecoInd(FCCAnalysesVertex TheVertex) { + return TheVertex.reco_ind; +} + +ROOT::VecOps::RVec get_VertexRecoParticlesInd( + FCCAnalysesVertex TheVertex, + const ROOT::VecOps::RVec &reco) { + + ROOT::VecOps::RVec result; + ROOT::VecOps::RVec indices_tracks = TheVertex.reco_ind; + for (int i = 0; i < indices_tracks.size(); i++) { + int tk_index = indices_tracks[i]; + for (int j = 0; j < reco.size(); j++) { + auto &p = reco[j]; + if (p.tracks_begin == p.tracks_end) + continue; + if (p.tracks_begin == tk_index) { + result.push_back(j); + break; + } + } + } + return result; +} + +TVectorD ParToACTS(TVectorD Par) { + + TVectorD pACTS(6); // Return vector + // + double fB = 2.; + Double_t b = -0.29988 * fB / 2.; + pACTS(0) = 1000 * Par(0); // D from m to mm + pACTS(1) = 1000 * Par(3); // z0 from m to mm + pACTS(2) = Par(1); // Phi0 is unchanged + pACTS(3) = TMath::ATan2(1.0, Par(4)); // Theta in [0, pi] range + pACTS(4) = Par(2) / (b * TMath::Sqrt(1 + Par(4) * Par(4))); // q/p in GeV + pACTS(5) = 0.0; // Time: currently undefined + // + return pACTS; +} + +// Covariance conversion to ACTS format +TMatrixDSym CovToACTS(TMatrixDSym Cov, TVectorD Par) { + + double fB = 2.; + TMatrixDSym cACTS(6); + cACTS.Zero(); + Double_t b = -0.29988 * fB / 2.; + // + // Fill derivative matrix + TMatrixD A(5, 5); + A.Zero(); + Double_t ct = Par(4); // cot(theta) + Double_t C = Par(2); // half curvature + A(0, 0) = 1000.; // D-D conversion to mm + A(1, 2) = 1.0; // phi0-phi0 + A(2, 4) = 1.0 / (TMath::Sqrt(1.0 + ct * ct) * b); // q/p-C + A(3, 1) = 1000.; // z0-z0 conversion to mm + A(4, 3) = -1.0 / (1.0 + ct * ct); // theta - cot(theta) + A(4, 4) = -C * ct / (b * pow(1.0 + ct * ct, 3.0 / 2.0)); // q/p-cot(theta) + // + TMatrixDSym Cv = Cov; + TMatrixD At(5, 5); + At.Transpose(A); + Cv.Similarity(At); + TMatrixDSub(cACTS, 0, 4, 0, 4) = Cv; + cACTS(5, 5) = + 0.1; // Currently undefined: set to arbitrary value to avoid crashes + // + return cACTS; +} + +//////////////////////////////////////////////////// + +// get all reconstructed vertices in a single vector +ROOT::VecOps::RVec +get_all_vertices(FCCAnalysesVertex PV, + ROOT::VecOps::RVec SV) { + // Returns a vector of all vertices (PV and SVs) + ROOT::VecOps::RVec result; + result.push_back(PV); + for (auto &p : SV) { + result.push_back(p); + } + return result; +} +// +ROOT::VecOps::RVec +get_all_vertices(FCCAnalysesVertex PV, + ROOT::VecOps::RVec> SV) { + // Returns a vector of all vertices (PV and SVs) + ROOT::VecOps::RVec result; + result.push_back(PV); + for (auto i_SV : SV) { + for (auto &p : i_SV) { + result.push_back(p); + } + } + return result; +} +// +// get all SVs in a single vector +ROOT::VecOps::RVec get_all_SVs( + ROOT::VecOps::RVec> vertices) { + // Returns a vector of all SVs + ROOT::VecOps::RVec result; + for (auto i_SV : vertices) { + for (auto &p : i_SV) { + result.push_back(p); + } + } + return result; +} + +// internal fns for SV finder + +// invariant mass of a two track vertex +double get_invM_pairs(FCCAnalysesVertex vertex, double m1, double m2) { + // CAUTION: m1 -> first track; m2 -> second track + + double result; + + ROOT::VecOps::RVec p_tracks = + vertex.updated_track_momentum_at_vertex; + + TLorentzVector p4_vtx; + double m[2] = {m1, m2}; + int nTr = p_tracks.size(); + + for (unsigned int i = 0; i < nTr; i++) { + TLorentzVector p4_tr; + p4_tr.SetXYZM(p_tracks[i].X(), p_tracks[i].Y(), p_tracks[i].Z(), m[i]); + p4_vtx += p4_tr; + } + + result = p4_vtx.M(); + return result; +} + +ROOT::VecOps::RVec +get_invM_pairs(ROOT::VecOps::RVec vertices, double m1, + double m2) { + // CAUTION: m1 -> first track; m2 -> second track + + ROOT::VecOps::RVec result; + for (auto &vertex : vertices) { + + double result_i; + ROOT::VecOps::RVec p_tracks = + vertex.updated_track_momentum_at_vertex; + + TLorentzVector p4_vtx; + double m[2] = {m1, m2}; + int nTr = p_tracks.size(); + + for (unsigned int i = 0; i < nTr; i++) { + TLorentzVector p4_tr; + p4_tr.SetXYZM(p_tracks[i].X(), p_tracks[i].Y(), p_tracks[i].Z(), m[i]); + p4_vtx += p4_tr; + } + + result_i = p4_vtx.M(); + result.push_back(result_i); + } + return result; +} + +// invariant mass of a vertex (assuming all tracks to be pions) +double get_invM(FCCAnalysesVertex vertex) { + + double result; + + ROOT::VecOps::RVec p_tracks = + vertex.updated_track_momentum_at_vertex; + + TLorentzVector p4_vtx; + const double m = 0.13957039; // pion mass + + for (TVector3 p_tr : p_tracks) { + TLorentzVector p4_tr; + p4_tr.SetXYZM(p_tr.X(), p_tr.Y(), p_tr.Z(), m); + p4_vtx += p4_tr; + } + + result = p4_vtx.M(); + return result; +} + +ROOT::VecOps::RVec +get_invM(ROOT::VecOps::RVec vertices) { + + ROOT::VecOps::RVec result; + for (auto &vertex : vertices) { + + double result_i; + ROOT::VecOps::RVec p_tracks = + vertex.updated_track_momentum_at_vertex; + + TLorentzVector p4_vtx; + const double m = 0.13957039; // pion mass + + for (TVector3 p_tr : p_tracks) { + TLorentzVector p4_tr; + p4_tr.SetXYZM(p_tr.X(), p_tr.Y(), p_tr.Z(), m); + p4_vtx += p4_tr; + } + + result_i = p4_vtx.M(); + result.push_back(result_i); + } + return result; +} + +// cos(angle) b/n V0 candidate's (or any vtx) momentum & PV to V0 displacement +// vector +double get_PV2V0angle(FCCAnalysesVertex V0, FCCAnalysesVertex PV) { + double result; + + ROOT::VecOps::RVec p_tracks = V0.updated_track_momentum_at_vertex; + + TVector3 p_sum; + for (TVector3 p_tr : p_tracks) + p_sum += p_tr; + + edm4hep::Vector3f r_V0 = V0.vertex.position; // in mm + edm4hep::Vector3f r_PV = PV.vertex.position; // in mm + + TVector3 r_V0_PV(r_V0[0] - r_PV[0], r_V0[1] - r_PV[1], r_V0[2] - r_PV[2]); + + double pDOTr = p_sum.Dot(r_V0_PV); + double p_mag = p_sum.Mag(); + double r_mag = r_V0_PV.Mag(); + + result = pDOTr / (p_mag * r_mag); + return result; +} + +// cos(angle) b/n track momentum sum & PV to vtx displacement vector +double get_PV2vtx_angle(ROOT::VecOps::RVec tracks, + FCCAnalysesVertex vtx, FCCAnalysesVertex PV) { + double result; + + TVector3 p_sum; + for (edm4hep::TrackState tr : tracks) { + TVectorD ipar = get_trackParam(tr); + TVector3 ip = ParToP(ipar); + p_sum += ip; + } + + edm4hep::Vector3f r_vtx = vtx.vertex.position; // in mm + edm4hep::Vector3f r_PV = PV.vertex.position; // in mm + + TVector3 r_vtx_PV(r_vtx[0] - r_PV[0], r_vtx[1] - r_PV[1], r_vtx[2] - r_PV[2]); + + double pDOTr = p_sum.Dot(r_vtx_PV); + double p_mag = p_sum.Mag(); + double r_mag = r_vtx_PV.Mag(); + + result = pDOTr / (p_mag * r_mag); + return result; +} + +// get track's energy assuming it to be a pion +double get_trackE(edm4hep::TrackState track) { + + double result; + + const double m_pi = 0.13957039; + + TVectorD par = get_trackParam(track); + TVector3 p = ParToP(par); + TLorentzVector p4; + p4.SetXYZM(p[0], p[1], p[2], m_pi); + + result = p4.E(); + return result; +} + +//////////////////////////////////////////////// + +// no of reconstructed V0s +int get_n_SV(FCCAnalysesV0 SV) { + int result = SV.vtx.size(); + return result; +} + +// vector of position of all reconstructed V0 (in mm) +ROOT::VecOps::RVec get_position_SV(FCCAnalysesV0 SV) { + ROOT::VecOps::RVec result; + for (FCCAnalysesVertex ivtx : SV.vtx) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + result.push_back(xyz); + } + return result; +} + +// vector of PDG IDs of all reconstructed V0 +ROOT::VecOps::RVec get_pdg_V0(FCCAnalysesV0 V0) { + ROOT::VecOps::RVec result = V0.pdgAbs; + return result; +} + +// vector of invariant masses of all reconstructed V0 +ROOT::VecOps::RVec get_invM_V0(FCCAnalysesV0 V0) { + ROOT::VecOps::RVec result = V0.invM; + return result; +} + +// +// vector of momenta of all reconstructed V0 +ROOT::VecOps::RVec get_p_SV(FCCAnalysesV0 SV) { + ROOT::VecOps::RVec result; + + for (FCCAnalysesVertex ivtx : SV.vtx) { + ROOT::VecOps::RVec p_tracks = + ivtx.updated_track_momentum_at_vertex; + + TVector3 p_sum; + for (TVector3 p_tr : p_tracks) + p_sum += p_tr; + + result.push_back(p_sum); + } + return result; +} + +// vector of chi2 of all reconstructed V0s +ROOT::VecOps::RVec get_chi2_SV(FCCAnalysesV0 SV) { + ROOT::VecOps::RVec result; + + for (FCCAnalysesVertex ivtx : SV.vtx) { + int nDOF = 2 * ivtx.ntracks - 3; + result.push_back(nDOF * ivtx.vertex.chi2); + } + return result; +} + +// passing a vector of FCCAnalysesVertex instead of new structs + +// no of reconstructed SVs +int get_n_SV(ROOT::VecOps::RVec vertices) { + int result = vertices.size(); + return result; +} + +// vector of momenta of all reconstructed vertices (SV.vtx or V0.vtx) +ROOT::VecOps::RVec +get_p_SV(ROOT::VecOps::RVec vertices) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) { + ROOT::VecOps::RVec p_tracks = + ivtx.updated_track_momentum_at_vertex; + + TVector3 p_sum; + for (TVector3 p_tr : p_tracks) + p_sum += p_tr; + + result.push_back(p_sum); + } + return result; +} + +// vector of position of all reconstructed SV (in mm) +ROOT::VecOps::RVec +get_position_SV(ROOT::VecOps::RVec vertices) { + ROOT::VecOps::RVec result; + for (FCCAnalysesVertex ivtx : vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + result.push_back(xyz); + } + return result; +} + +// vector of momentum magnitude of all reconstructed vertices (SV.vtx or V0.vtx) +ROOT::VecOps::RVec +get_pMag_SV(ROOT::VecOps::RVec vertices) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) { + ROOT::VecOps::RVec p_tracks = + ivtx.updated_track_momentum_at_vertex; + + TVector3 p_sum; + for (TVector3 p_tr : p_tracks) + p_sum += p_tr; + + result.push_back(p_sum.Mag()); + } + return result; +} + +// vector of chi2 of all reconstructed vertices (SV.vtx or V0.vtx) +ROOT::VecOps::RVec +get_chi2_SV(ROOT::VecOps::RVec vertices) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) { + int nDOF = 2 * ivtx.ntracks - 3; + result.push_back(nDOF * ivtx.vertex.chi2); + } + return result; +} + +// vector of chi2 (normalised) of all reconstructed vertices (SV.vtx or V0.vtx) +ROOT::VecOps::RVec +get_norm_chi2_SV(ROOT::VecOps::RVec vertices) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) + result.push_back(ivtx.vertex.chi2); + return result; +} + +// vector of nDOF of all reconstructed vertices (SV.vtx or V0.vtx) +ROOT::VecOps::RVec +get_nDOF_SV(ROOT::VecOps::RVec vertices) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) + result.push_back(2 * ivtx.ntracks - 3); + return result; +} + +// vector of polar angle (theta) of all reconstructed vertices (SV.vtx or +// V0.vtx) +ROOT::VecOps::RVec +get_theta_SV(ROOT::VecOps::RVec vertices) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + result.push_back(xyz.Theta()); + } + return result; +} + +// vector of azimuth angle (phi) of all reconstructed vertices (SV.vtx or +// V0.vtx) +ROOT::VecOps::RVec +get_phi_SV(ROOT::VecOps::RVec vertices) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + result.push_back(xyz.Phi()); + } + return result; +} + +// vector of (cos of) angles b/n vtx momenta & PV to vtx displacement vectors +ROOT::VecOps::RVec +get_pointingangle_SV(ROOT::VecOps::RVec vertices, + FCCAnalysesVertex PV) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) { + double iresult = 0.; + + ROOT::VecOps::RVec p_tracks = + ivtx.updated_track_momentum_at_vertex; + TVector3 p_sum; + for (TVector3 p_tr : p_tracks) + p_sum += p_tr; + + edm4hep::Vector3f r_vtx = ivtx.vertex.position; // in mm + edm4hep::Vector3f r_PV = PV.vertex.position; // in mm + + TVector3 r_vtx_PV(r_vtx[0] - r_vtx[0], r_vtx[1] - r_PV[1], + r_vtx[2] - r_PV[2]); + + double pDOTr = p_sum.Dot(r_vtx_PV); + double p_mag = p_sum.Mag(); + double r_mag = r_vtx_PV.Mag(); + + iresult = pDOTr / (p_mag * r_mag); + result.push_back(iresult); + } + return result; +} + +// vector of distances of all reconstructed SV from PV (in mm in xy plane) +ROOT::VecOps::RVec +get_dxy_SV(ROOT::VecOps::RVec vertices, + FCCAnalysesVertex PV) { + ROOT::VecOps::RVec result; + TVector3 x_PV(PV.vertex.position[0], PV.vertex.position[1], + PV.vertex.position[2]); + for (auto &ivtx : vertices) { + TVector3 x_vtx(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + TVector3 x_vtx_PV = x_vtx - x_PV; + + result.push_back(x_vtx_PV.Perp()); + } + return result; +} + +// vector of distances of all reconstructed SV from PV (in mm in 3D) +ROOT::VecOps::RVec +get_d3d_SV(ROOT::VecOps::RVec vertices, + FCCAnalysesVertex PV) { + ROOT::VecOps::RVec result; + TVector3 x_PV(PV.vertex.position[0], PV.vertex.position[1], + PV.vertex.position[2]); + for (auto &ivtx : vertices) { + TVector3 x_vtx(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + TVector3 x_vtx_PV = x_vtx - x_PV; + + result.push_back(x_vtx_PV.Mag()); + } + return result; +} + +// vector of distances of all reconstructed SV from given TVector3d (in mm in +// 3D) +ROOT::VecOps::RVec +get_d3d_SV_obj(ROOT::VecOps::RVec vertices, + TVector3 object) { + ROOT::VecOps::RVec result; + for (auto &ivtx : vertices) { + TVector3 x_vtx(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + x_vtx = x_vtx - object; + + result.push_back(x_vtx.Mag()); + } + return result; +} + +// vector of distances of all reconstructed SV from given edm4hep::Vector3d (in +// mm in 3D) +ROOT::VecOps::RVec +get_d3d_SV_obj(ROOT::VecOps::RVec vertices, + edm4hep::Vector3d object) { + ROOT::VecOps::RVec result; + for (auto &ivtx : vertices) { + double dx = ivtx.vertex.position[0] - object.x; + double dy = ivtx.vertex.position[1] - object.y; + double dz = ivtx.vertex.position[2] - object.z; + + TVector3 d3d(dx, dy, dz); + + result.push_back(d3d.Mag()); + } + return result; +} + +// vector of decay position distances of all reconstructed SV from given +// TVector3d (in mm in 3D) +ROOT::VecOps::RVec +get_dR_SV_obj(ROOT::VecOps::RVec vertices, TVector3 object) { + ROOT::VecOps::RVec result; + for (auto &ivtx : vertices) { + TVector3 x_vtx(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + result.push_back(1e3 * TMath::Sqrt(pow(ivtx.vertex.position[0], 2) + + pow(ivtx.vertex.position[1], 2) + + pow(ivtx.vertex.position[2], 2)) - + 1e3 * TMath::Sqrt(pow(object.x(), 2) + pow(object.y(), 2) + + pow(object.z(), 2))); + } + return result; +} + +// vector of decay position distances of all reconstructed SV from given +// edm4hep::Vector3d (in mm in 3D) +ROOT::VecOps::RVec +get_dR_SV_obj(ROOT::VecOps::RVec vertices, + edm4hep::Vector3d object) { + ROOT::VecOps::RVec result; + for (auto &ivtx : vertices) { + result.push_back(1e3 * TMath::Sqrt(pow(ivtx.vertex.position[0], 2) + + pow(ivtx.vertex.position[1], 2) + + pow(ivtx.vertex.position[2], 2)) - + 1e3 * TMath::Sqrt(pow(object.x, 2) + pow(object.y, 2) + + pow(object.z, 2))); + } + return result; +} + +// vector of polar angle (theta) of all reconstructed vertices wrt jet axis +// (SV.vtx or V0.vtx) +ROOT::VecOps::RVec +get_relTheta_SV(ROOT::VecOps::RVec vertices, + ROOT::VecOps::RVec nSV_jet, + ROOT::VecOps::RVec jets) { + ROOT::VecOps::RVec result; + + unsigned int j = 0; + int nSV = nSV_jet[0]; + for (unsigned int i = 0; i < vertices.size(); i++) { + auto &ivtx = vertices[i]; + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + + if (i >= nSV) { + j++; + nSV += nSV_jet[j]; + } + auto &ijet = jets[j]; + double jetTheta = ijet.theta(); + + result.push_back(xyz.Theta() - jetTheta); + } + return result; +} + +// vector of azimuthal angle (phi) of all reconstructed vertices wrt jet axis +// (SV.vtx or V0.vtx) +ROOT::VecOps::RVec +get_relPhi_SV(ROOT::VecOps::RVec vertices, + ROOT::VecOps::RVec nSV_jet, + ROOT::VecOps::RVec jets) { + ROOT::VecOps::RVec result; + + unsigned int j = 0; + int nSV = nSV_jet[0]; + for (unsigned int i = 0; i < vertices.size(); i++) { + auto &ivtx = vertices[i]; + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + + if (i >= nSV) { + j++; + nSV += nSV_jet[j]; + } + auto &ijet = jets[j]; + TVector3 jetP(ijet.px(), ijet.py(), ijet.pz()); + + result.push_back(xyz.DeltaPhi(jetP)); + } + return result; +} + +// For get_SV_jets outputs + +// no of reconstructed SVs +int get_n_SV( + ROOT::VecOps::RVec> vertices) { + int result = 0; + if (vertices.size() != 0) { + for (auto SV_jets : vertices) + result += SV_jets.size(); + } + return result; +} + +// Return the number of reconstructed SVs +ROOT::VecOps::RVec get_n_SV_jets( + ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec result; + if (vertices.size() != 0) { + for (auto SV_jets : vertices) + result.push_back(SV_jets.size()); + } + return result; +} + +// +// separate V0s by jets +ROOT::VecOps::RVec> +get_svInJets(ROOT::VecOps::RVec vertices, + ROOT::VecOps::RVec nSV_jet) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + int index = 0; + for (unsigned int i : nSV_jet) { + for (unsigned int j = 0; j < i; j++) { + i_result.push_back(vertices[j + index]); + } + + result.push_back(i_result); + i_result.clear(); + index += i; + } + return result; +} + +// separate tracks by jet +std::vector> get_tracksInJets( + ROOT::VecOps::RVec recoparticles, + ROOT::VecOps::RVec thetracks, + ROOT::VecOps::RVec jets, + std::vector> jet_consti) { + std::vector> result; + std::vector iJet_tracks; + + int nJet = jets.size(); + // + for (unsigned int j = 0; j < nJet; j++) { + + std::vector i_jetconsti = jet_consti[j]; + + for (unsigned int ip : i_jetconsti) { + auto &p = recoparticles[ip]; + if (p.tracks_begin >= 0 && p.tracks_begin < thetracks.size()) + iJet_tracks.push_back(thetracks.at(p.tracks_begin)); + } + + result.push_back(iJet_tracks); + iJet_tracks.clear(); + } + return result; +} + +// vector of polar angle (theta) of reconstructed vertices of a jet wrt that jet +// axis [only for vertices from 1 jet] +ROOT::VecOps::RVec +get_relTheta_SV(ROOT::VecOps::RVec vertices, + fastjet::PseudoJet jet) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + // + result.push_back(xyz.Theta() - jet.theta()); + } + // + return result; +} + +// vector of azimuthal angle (phi) of all reconstructed vertices wrt jet axis +// [only for vertices from 1 jet] +ROOT::VecOps::RVec +get_relPhi_SV(ROOT::VecOps::RVec vertices, + fastjet::PseudoJet jet) { + ROOT::VecOps::RVec result; + + for (auto &ivtx : vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + TVector3 jetP(jet.px(), jet.py(), jet.pz()); + // + result.push_back(xyz.DeltaPhi(jetP)); + } + // + return result; +} + +/////// vec of vec functions (for get_SV_jets) ///////// + +// SV invariant mass +ROOT::VecOps::RVec> +get_invM(ROOT::VecOps::RVec> vertices) { + + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + + for (auto &vertex : i_vertices) { + ROOT::VecOps::RVec p_tracks = + vertex.updated_track_momentum_at_vertex; + // + TLorentzVector p4_vtx; + const double m = 0.13957039; // pion mass + // + for (TVector3 p_tr : p_tracks) { + TLorentzVector p4_tr; + p4_tr.SetXYZM(p_tr.X(), p_tr.Y(), p_tr.Z(), m); + p4_vtx += p4_tr; + } + i_result.push_back(p4_vtx.M()); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV momentum +ROOT::VecOps::RVec> +get_p_SV(ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + // + for (auto &ivtx : i_vertices) { + ROOT::VecOps::RVec p_tracks = + ivtx.updated_track_momentum_at_vertex; + + TVector3 p_sum; + for (TVector3 p_tr : p_tracks) + p_sum += p_tr; + + i_result.push_back(p_sum); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV momentum magnitude +ROOT::VecOps::RVec> get_pMag_SV( + ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + // + for (auto &ivtx : i_vertices) { + ROOT::VecOps::RVec p_tracks = + ivtx.updated_track_momentum_at_vertex; + + TVector3 p_sum; + for (TVector3 p_tr : p_tracks) + p_sum += p_tr; + + i_result.push_back(p_sum.Mag()); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV daughters multiplicity +ROOT::VecOps::RVec> get_VertexNtrk( + ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + // + for (auto &TheVertex : i_vertices) { + i_result.push_back(TheVertex.ntracks); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV chi2 +ROOT::VecOps::RVec> get_chi2_SV( + ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + // + for (auto &ivtx : i_vertices) { + int nDOF = 2 * ivtx.ntracks - 3; + i_result.push_back(nDOF * ivtx.vertex.chi2); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV normalised chi2 +ROOT::VecOps::RVec> get_norm_chi2_SV( + ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + // + for (auto &ivtx : i_vertices) + i_result.push_back(ivtx.vertex.chi2); + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV no of DOF +ROOT::VecOps::RVec> get_nDOF_SV( + ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + // + for (auto &ivtx : i_vertices) + i_result.push_back(2 * ivtx.ntracks - 3); + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV theta +ROOT::VecOps::RVec> get_theta_SV( + ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + + for (auto &ivtx : i_vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + i_result.push_back(xyz.Theta()); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV phi +ROOT::VecOps::RVec> +get_phi_SV(ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + + for (auto &ivtx : i_vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + i_result.push_back(xyz.Phi()); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV relative theta +ROOT::VecOps::RVec> get_relTheta_SV( + ROOT::VecOps::RVec> vertices, + ROOT::VecOps::RVec jets) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < jets.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + fastjet::PseudoJet i_jet = jets.at(i); + for (auto &ivtx : i_vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + // + i_result.push_back(xyz.Theta() - i_jet.theta()); + } + result.push_back(i_result); + i_result.clear(); + } + // + return result; +} + +// SV relative phi +ROOT::VecOps::RVec> get_relPhi_SV( + ROOT::VecOps::RVec> vertices, + ROOT::VecOps::RVec jets) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < jets.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + fastjet::PseudoJet i_jet = jets.at(i); + for (auto &ivtx : i_vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + TVector3 jetP(i_jet.px(), i_jet.py(), i_jet.pz()); + // + i_result.push_back(xyz.DeltaPhi(jetP)); + } + result.push_back(i_result); + i_result.clear(); + } + // + return result; +} + +// SV pointing angle wrt PV +ROOT::VecOps::RVec> get_pointingangle_SV( + ROOT::VecOps::RVec> vertices, + FCCAnalysesVertex PV) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + edm4hep::Vector3f r_PV = PV.vertex.position; // in mm + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + for (auto &ivtx : i_vertices) { + double pointangle = 0.; + + ROOT::VecOps::RVec p_tracks = + ivtx.updated_track_momentum_at_vertex; + TVector3 p_sum; + for (TVector3 p_tr : p_tracks) + p_sum += p_tr; + + edm4hep::Vector3f r_vtx = ivtx.vertex.position; // in mm + + TVector3 r_vtx_PV(r_vtx[0] - r_vtx[0], r_vtx[1] - r_PV[1], + r_vtx[2] - r_PV[2]); + + double pDOTr = p_sum.Dot(r_vtx_PV); + double p_mag = p_sum.Mag(); + double r_mag = r_vtx_PV.Mag(); + + pointangle = pDOTr / (p_mag * r_mag); + i_result.push_back(pointangle); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV distance from PV in xy +ROOT::VecOps::RVec> +get_dxy_SV(ROOT::VecOps::RVec> vertices, + FCCAnalysesVertex PV) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + TVector3 x_PV(PV.vertex.position[0], PV.vertex.position[1], + PV.vertex.position[2]); + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + // + for (auto &ivtx : i_vertices) { + TVector3 x_vtx(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + TVector3 x_vtx_PV = x_vtx - x_PV; + + i_result.push_back(x_vtx_PV.Perp()); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV distance from PV in 3D +ROOT::VecOps::RVec> +get_d3d_SV(ROOT::VecOps::RVec> vertices, + FCCAnalysesVertex PV) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + TVector3 x_PV(PV.vertex.position[0], PV.vertex.position[1], + PV.vertex.position[2]); + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_vertices = vertices.at(i); + // + for (auto &ivtx : i_vertices) { + TVector3 x_vtx(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + TVector3 x_vtx_PV = x_vtx - x_PV; + + i_result.push_back(x_vtx_PV.Mag()); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// SV position in 3D +ROOT::VecOps::RVec> get_position_SV( + ROOT::VecOps::RVec> vertices) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + for (unsigned int i = 0; i < vertices.size(); i++) { + ROOT::VecOps::RVec i_result; + ROOT::VecOps::RVec i_vertices = vertices.at(i); + // + for (auto &ivtx : i_vertices) { + TVector3 xyz(ivtx.vertex.position[0], ivtx.vertex.position[1], + ivtx.vertex.position[2]); + i_result.push_back(xyz); + } + result.push_back(i_result); + i_result.clear(); + } + return result; +} + +// V0 pdg +ROOT::VecOps::RVec> +get_pdg_V0(ROOT::VecOps::RVec pdg, ROOT::VecOps::RVec nSV_jet) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + int index = 0; + for (unsigned int i : nSV_jet) { + for (unsigned int j = 0; j < i; j++) { + i_result.push_back(pdg[j + index]); + } + + result.push_back(i_result); + i_result.clear(); + index += i; + } + return result; +} + +// V0 invariant mass +ROOT::VecOps::RVec> +get_invM_V0(ROOT::VecOps::RVec invM, ROOT::VecOps::RVec nSV_jet) { + ROOT::VecOps::RVec> result; + ROOT::VecOps::RVec i_result; + + int index = 0; + for (unsigned int i : nSV_jet) { + for (unsigned int j = 0; j < i; j++) { + i_result.push_back(invM[j + index]); + } + + result.push_back(i_result); + i_result.clear(); + index += i; + } + return result; +} + +} // namespace VertexingUtils + +} // namespace FCCAnalyses diff --git a/analyzers/dataframe/src/myUtils.cc b/analyzers/dataframe/src/myUtils.cc index 0b49ce738e..e348dd41e8 100644 --- a/analyzers/dataframe/src/myUtils.cc +++ b/analyzers/dataframe/src/myUtils.cc @@ -3,14 +3,6 @@ #include #include -#include "awkward/Content.h" -#include "awkward/io/json.h" -#include "awkward/array/NumpyArray.h" -#include "awkward/array/RecordArray.h" -#include "awkward/array/Record.h" -#include "awkward/builder/ArrayBuilder.h" -#include "awkward/builder/ArrayBuilderOptions.h" - #include "FCCAnalyses/myUtils.h" #include "FCCAnalyses/VertexFitterSimple.h" #include "FCCAnalyses/ReconstructedParticle.h" @@ -1224,7 +1216,7 @@ ROOT::VecOps::RVec get_truetrack(ROOT::VecOps::RVec in TVector3 momentum ( tlv.Px(),tlv.Py(),tlv.Pz()); - TVectorD track_param = VertexFitterSimple::XPtoPar( vertexFB, momentum, charge ); + TVectorD track_param = VertexingUtils::XPtoPar( vertexFB, momentum, charge ); track.D0 = track_param[0] * 1e3 ; // from meters to mm @@ -1276,7 +1268,7 @@ ROOT::VecOps::RVec get_pseudotrack(ROOT::VecOps::RVec getFCCAnalysesComposite_track(ROOT::VecO vertex.at(p.vertex).vertex.position.z * norm); TVector3 momentum ( p.particle.Px(),p.particle.Py(),p.particle.Pz()); - TVectorD track_param = VertexFitterSimple::XPtoPar( vertexFB, momentum, p.charge ); + TVectorD track_param = VertexingUtils::XPtoPar( vertexFB, momentum, p.charge ); track.D0 = track_param[0] * 1e3 ; // from meters to mm @@ -1432,98 +1424,6 @@ bool isPV(edm4hep::ReconstructedParticleData recop, ROOT::VecOps::RVec pvin } -build_composite_vertex::build_composite_vertex(int arg_n, int arg_charge, float arg_masslow, float arg_masshigh, float arg_p, bool arg_cc, bool arg_filterPV): m_n(arg_n),m_charge(arg_charge),m_masslow(arg_masslow),m_masshigh(arg_masshigh),m_p(arg_p),m_cc(arg_cc),m_filterPV(arg_filterPV){}; -ROOT::VecOps::RVec -build_composite_vertex::operator() (ROOT::VecOps::RVec recop, - ROOT::VecOps::RVec tracks, - ROOT::VecOps::RVec in, - ROOT::VecOps::RVec pvindex){ - - ROOT::VecOps::RVec result; - - awkward::ArrayBuilder builder(awkward::ArrayBuilderOptions(1024, 2.0)); - for (size_t i = 0; i < in.size(); ++i) { - if (get_p(recop.at(in.at(i))) array = builder.snapshot(); - //std::cout << array.get()->tojson(false, 1)< comb = array.get()->combinations(m_n, false, nullptr, awkward::util::Parameters(), 0, 0); - int64_t length = comb->length(); - - - //loop over combinations - for (int64_t i=0;igetitem_at(i); - awkward::Record* recitem = dynamic_cast(item.get()); - std::vector> contentvec = recitem->contents(); - //loop over items of the comb - ROOT::VecOps::RVec tmpvec; - - for (size_t j=0;j(contentvec.at(j).get()); - int64_t lengthnp = numpyraw->length(); - - //loop over the items of the items and get the data (if nested array) - for (int64_t k=0;kgetitem_at(k); - awkward::NumpyArray* npitem = dynamic_cast(item2.get()); - int32_t value = *reinterpret_cast(npitem->data()); - if (k==0)tmpvec.push_back(value); - else tmpvec.push_back(value); - } - //in case the data structure is a simple array (and not an array with a nested array) - if (lengthnp<0){ - int32_t value = *reinterpret_cast(numpyraw->data()); - tmpvec.push_back(value); - } - } - - int charge=0; - for (size_t k=0;k index; - for (size_t k=0;k @@ -1535,25 +1435,6 @@ build_D0::operator() (ROOT::VecOps::RVec rec ROOT::VecOps::RVec result; - /*awkward::ArrayBuilder builder(awkward::ArrayBuilderOptions(1024, 2.0)); - builder.beginlist(); - for (size_t i = 0; i < pions.size(); ++i) builder.integer(pions.at(i)); - builder.endlist(); - builder.beginlist(); - for (size_t i = 0; i < kaons.size(); ++i) builder.integer(kaons.at(i)); - builder.endlist(); - - std::shared_ptr array = builder.snapshot(); - std::cout << array.get()->tojson(false, 1)< comb = array.get()->combinations(2, false, nullptr, awkward::util::Parameters(), 2, 0); - int64_t length = comb->length(); - - for (int64_t i = 0; i < length; ++i){ - std::shared_ptr selection = comb.get()->getitem_at(i); - std::cout << "i="<tojson(false, 1) << std::endl; - }*/ - - for (size_t i = 0; i < pions.size(); ++i){ //pion p cut if (get_p(recop.at(pions.at(i))) get_RP_atVertex(ROOT::Vec } -ROOT::VecOps::RVec awkwardtest(ROOT::VecOps::RVec recop, - ROOT::VecOps::RVec tracks, - ROOT::VecOps::RVec recind, - ROOT::VecOps::RVec mcind, - ROOT::VecOps::RVec mc){ - - ROOT::VecOps::RVec result; - ROOT::VecOps::RVec rp_ind; - ROOT::VecOps::RVec tk_ind; - - ROOT::VecOps::RVec seltracks = VertexingUtils::selTracks(0.,3.,0.,3.)( recop, tracks); - VertexingUtils::FCCAnalysesVertex ThePVertex = VertexFitterSimple::VertexFitter(0,seltracks, tracks); - - int PV_ntrk = ThePVertex.ntracks; - float PV_chi2 = ThePVertex.vertex.chi2; - ROOT::VecOps::RVec reco_ind = ThePVertex.reco_ind; - - std::cout << "ntracks PV " << PV_ntrk << " nreco ind " < reco; - for (size_t i = 0; i < rp_ind.size(); ++i) { - reco.push_back(recop[rp_ind.at(i)]); - } - std::cout <<"beofre"< pions = ReconstructedParticle2MC::selRP_PDG_index(211,true)(recind, mcind, recop, mc) ; - ROOT::VecOps::RVec kaons = ReconstructedParticle2MC::selRP_PDG_index(321,true)(recind, mcind, recop, mc) ; - - std::cout << "n pions " << pions.size() << std::endl; - std::cout << "n kaons " << kaons.size() << std::endl; - - awkward::ArrayBuilder builder(awkward::ArrayBuilderOptions(1024, 2.0)); - for (size_t i = 0; i < rp_ind.size(); ++i) { - builder.beginlist(); - builder.integer(rp_ind.at(i)); - builder.integer(tk_ind.at(i)); - builder.endlist(); - } - - std::shared_ptr array = builder.snapshot(); - std::shared_ptr comb = array.get()->combinations(2, false, nullptr, awkward::util::Parameters(), 0, 0); - int64_t length = comb->length(); - - std::cout << "recarray ntracks : " << tracks.size()<< " length 2 comb " << length << std::endl; - - ROOT::VecOps::RVec> vec_rp; - ROOT::VecOps::RVec> vec_tk; - - //loop over combinations - for (int64_t i=0;igetitem_at(i); - awkward::Record* recitem = dynamic_cast(item.get()); - std::vector> contentvec = recitem->contents(); - //loop over items of the comb - ROOT::VecOps::RVec tmpvec_rp; - ROOT::VecOps::RVec tmpvec_tk; - - for (size_t j=0;j(contentvec.at(j).get()); - int64_t lengthnp = numpyraw->length(); - - //loop over the items of the items and get the data (if nested array) - for (int64_t k=0;kgetitem_at(k); - awkward::NumpyArray* npitem = dynamic_cast(item2.get()); - int32_t value = *reinterpret_cast(npitem->data()); - if (k==0)tmpvec_rp.push_back(value); - else tmpvec_tk.push_back(value); - } - //in case the data structure is a simple array (and not an array with a nested array) - if (lengthnp<0){ - int32_t value = *reinterpret_cast(numpyraw->data()); - } - } - - int charge=0; - bool pcut=false; - for (size_t k=0;k0.05)continue; - - - - - ROOT::VecOps::RVec recoparticles; - ROOT::VecOps::RVec thetracks; - for (size_t k=0;k10.)continue; - - std::cout << "SELECTED----------------"< recop, ROOT::VecOps::RVec index){ @@ -2206,126 +1938,6 @@ build_tau23pi::operator() (ROOT::VecOps::RVec } -build_tau23pi_vertexing::build_tau23pi_vertexing(int arg_charge, float arg_masslow, float arg_masshigh, float arg_p, float arg_angle, bool arg_cc, bool arg_filterPV, bool arg_rho):m_charge(arg_charge),m_masslow(arg_masslow),m_masshigh(arg_masshigh),m_p(arg_p),m_angle(arg_angle),m_cc(arg_cc),m_filterPV(arg_filterPV),m_rho(arg_rho){}; -ROOT::VecOps::RVec -build_tau23pi_vertexing::operator() (ROOT::VecOps::RVec recop, - ROOT::VecOps::RVec tracks, - ROOT::VecOps::RVec in, - ROOT::VecOps::RVec pvindex){ - - ROOT::VecOps::RVec result; - - awkward::ArrayBuilder builder(awkward::ArrayBuilderOptions(1024, 2.0)); - for (size_t i = 0; i < in.size(); ++i) { - if (get_p(recop.at(in.at(i))) array = builder.snapshot(); - //std::cout << array.get()->tojson(false, 1)< comb = array.get()->combinations(3, false, nullptr, awkward::util::Parameters(), 0, 0); - int64_t length = comb->length(); - - - //loop over combinations - for (int64_t i=0;igetitem_at(i); - awkward::Record* recitem = dynamic_cast(item.get()); - std::vector> contentvec = recitem->contents(); - //loop over items of the comb - ROOT::VecOps::RVec tmpvec; - - for (size_t j=0;j(contentvec.at(j).get()); - int64_t lengthnp = numpyraw->length(); - - //loop over the items of the items and get the data (if nested array) - for (int64_t k=0;kgetitem_at(k); - awkward::NumpyArray* npitem = dynamic_cast(item2.get()); - int32_t value = *reinterpret_cast(npitem->data()); - if (k==0)tmpvec.push_back(value); - else tmpvec.push_back(value); - } - //in case the data structure is a simple array (and not an array with a nested array) - if (lengthnp<0){ - int32_t value = *reinterpret_cast(numpyraw->data()); - tmpvec.push_back(value); - } - } - int charge=0; - for (size_t k=0;km_masshigh)continue; - - if (m_rho){ - ROOT::VecOps::RVec tmpvec_rho; - if (recop[tmpvec.at(0)].charge!=recop[tmpvec.at(1)].charge){ - tmpvec_rho.push_back(tmpvec.at(0)); - tmpvec_rho.push_back(tmpvec.at(1)); - float mass_rho=build_invmass(recop,tmpvec_rho); - std::cout <<"rho mass comd 1 " << mass_rho << std::endl; - if (mass_rho<0.6)continue; - if (mass_rho>0.9)continue; - } - else if (recop[tmpvec.at(0)].charge!=recop[tmpvec.at(2)].charge){ - tmpvec_rho.push_back(tmpvec.at(0)); - tmpvec_rho.push_back(tmpvec.at(2)); - float mass_rho=build_invmass(recop,tmpvec_rho); - std::cout <<"rho mass comd 2 " << mass_rho << std::endl; - if (mass_rho<0.6)continue; - if (mass_rho>0.9)continue; - } - else if (recop[tmpvec.at(1)].charge!=recop[tmpvec.at(2)].charge){ - tmpvec_rho.push_back(tmpvec.at(1)); - tmpvec_rho.push_back(tmpvec.at(2)); - float mass_rho=build_invmass(recop,tmpvec_rho); - std::cout <<"rho mass comd 3 " << mass_rho << std::endl; - if (mass_rho<0.6)continue; - if (mass_rho>0.9)continue; - } - else - std::cout <<"unpexted things happening build_tau23pi::build_tau23pi" < recoparticles; - for (size_t k=0;k10.)continue; - - //std::cout << "SELECTED----------------"< index; - for (size_t k=0;k get_Vertex_thrusthemis_angle(ROOT::VecOps::RVec vertex, diff --git a/bin/fccanalysis b/bin/fccanalysis index c04c5359f1..2d45603d14 100755 --- a/bin/fccanalysis +++ b/bin/fccanalysis @@ -1,33 +1,40 @@ #!/usr/bin/env python3 +import argparse -if __name__ == "__main__": - import argparse - import sys - parser = argparse.ArgumentParser(description='FCCAnalyses parser') - subparsers = parser.add_subparsers(help='types of running modes', dest='command') - parser_init = subparsers.add_parser('init', help="generate a RDataFrame based FCC analysis") - parser_run = subparsers.add_parser('run', help="run a RDataFrame based FCC analysis") - parser_run_final = subparsers.add_parser('final', help="run a RDataFrame based FCC analysis final configuration") - parser_run_plots = subparsers.add_parser('plots', help="run a RDataFrame based FCC analysis plot configuration") +def main(): + parser = argparse.ArgumentParser(description='FCCAnalyses parser') + subparsers = parser.add_subparsers(help='types of running modes', dest='command') + parser_init = subparsers.add_parser('init', help="generate a RDataFrame based FCC analysis") + parser_build = subparsers.add_parser('build', help='build and install local analysis') + parser_pin = subparsers.add_parser('pin', help='pin fccanalyses to the current version of Key4hep stack') + parser_run = subparsers.add_parser('run', help="run a RDataFrame based FCC analysis") + parser_run_final = subparsers.add_parser('final', help="run a RDataFrame based FCC analysis final configuration") + parser_run_plots = subparsers.add_parser('plots', help="run a RDataFrame based FCC analysis plot configuration") + + import config.Parsers as fccpars + fccpars.setup_init_parser(parser_init) + fccpars.setup_build_parser(parser_build) + fccpars.setup_pin_parser(parser_pin) + fccpars.setup_run_parser(parser_run) + fccpars.setup_run_parser_final(parser_run_final) + fccpars.setup_run_parser_plots(parser_run_plots) - from config.Parsers import * - setup_init_parser(parser_init) - setup_run_parser(parser_run) - setup_run_parser_final(parser_run_final) - setup_run_parser_plots(parser_run_plots) + args = parser.parse_args() - args = parser.parse_args() + if args.command == 'init': + from config.FCCAnalysisSetup import setup + setup(parser) + elif args.command == 'build': + from config.build_analysis import build_analysis + build_analysis(parser) + elif args.command == 'pin': + from config.pin_analysis import PinAnalysis + PinAnalysis(parser) + else: + from config.FCCAnalysisRun import run + run(parser) - if len(sys.argv)<3: - print("minimal running requirements : fccanalysis ") - print("running example : fccanalysis run examples/FCCee/higgs/mH-recoil/mumu/analysis_stage1.py") - print("for running options, try : fccanalysis --help and fccanalysis --help") - sys.exit(3) - if args.command == 'init': - from config.FCCAnalysisSetup import setup - setup(parser) - else: - from config.FCCAnalysisRun import run - run(parser) +if __name__ == "__main__": + main() diff --git a/cmake/FindDelphes.cmake b/cmake/FindDelphes.cmake new file mode 100644 index 0000000000..9c578d3368 --- /dev/null +++ b/cmake/FindDelphes.cmake @@ -0,0 +1,67 @@ +set(searchpath + $ENV{DELPHES_DIR} + $ENV{DELPHES_DIR}/external + $ENV{DELPHES_DIR}/lib + $ENV{DELPHES_DIR}/include + $ENV{DELPHES_DIR}/include/TrackCovariance +# $ENV{DELPHES} +# $ENV{DELPHES}/external +# $ENV{DELPHES}/lib +# $ENV{DELPHES}/include + ) + + +find_library(DELPHES_LIBRARY + NAMES Delphes delphes + HINTS ${searchpath} + PATH_SUFFIXES lib) + +find_path(DELPHES_INCLUDE_DIR + # NAMES DelphesClasses.h Delphes.h + NAMES classes/DelphesClasses.h modules/Delphes.h external/ExRootAnalysis + HINTS ${searchpath} + PATH_SUFFIXES include) + +find_path(DELPHES_EXTERNALS_INCLUDE_DIR + # NAMES DelphesClasses.h Delphes.h + NAMES ExRootAnalysis/ExRootConfReader.h + HINTS ${searchpath} + PATH_SUFFIXES include +) + +find_path(DELPHES_EXTERNALS_TKCOV_INCLUDE_DIR + # NAMES DelphesClasses.h Delphes.h + NAMES TrkUtil.h + HINTS ${searchpath} + PATH_SUFFIXES include +) + +# Necessary to run the tests +find_path(DELPHES_BINARY_DIR + NAMES DelphesROOT + HINTS ${DELPHES_INCLUDE_DIR}/../bin +) + +find_path(DELPHES_CARDS_DIR + NAMES delphes_card_IDEA.tcl + HINTS ${searchpath} + PATH_SUFFIXES cards) + +unset(searchpath) + +set(DELPHES_INCLUDE_DIRS ${DELPHES_INCLUDE_DIR} ${DELPHES_EXTERNALS_INCLUDE_DIR}) +set(DELPHES_EXTERNALS_INCLUDE_DIRS ${DELPHES_EXTERNALS_INCLUDE_DIR}) +set(DELPHES_EXTERNALS_TKCOV_INCLUDE_DIRS ${DELPHES_EXTERNALS_TKCOV_INCLUDE_DIR}) +set(DELPHES_LIBRARIES ${DELPHES_LIBRARY}) + +# Delphes does not offer an obvious version indicator, but we need to know +# whether the TrackCovariance module is available or not. So here we simply +# check whether the corresponding header is installed +find_file(DELPHES_TRACK_COV_HEADER modules/TrackCovariance.h PATHS ${DELPHES_INCLUDE_DIRS} NO_DEFAULT_PATHS) + +include(FindPackageHandleStandardArgs) +# handle the QUIETLY and REQUIRED arguments and set DELPHES_FOUND to TRUE +# if all listed variables are TRUE +find_package_handle_standard_args(Delphes DEFAULT_MSG DELPHES_INCLUDE_DIR DELPHES_EXTERNALS_INCLUDE_DIR DELPHES_EXTERNALS_TKCOV_INCLUDE_DIR DELPHES_LIBRARY) + +mark_as_advanced(DELPHES_INCLUDE_DIR DELPHES_EXTERNALS_INCLUDE_DIR DELPHES_EXTERNALS_TKCOV_INCLUDE_DIR DELPHES_LIBRARY DELPHES_BINARY_DIR DELPHES_TRACK_COV_HEADER) diff --git a/config/FCCAnalysisRun.py b/config/FCCAnalysisRun.py index a7e89faf41..1e8f5d6773 100644 --- a/config/FCCAnalysisRun.py +++ b/config/FCCAnalysisRun.py @@ -10,14 +10,7 @@ from config.common_defaults import deffccdicts import datetime -print ("----> Load cxx analyzers from libFCCAnalyses... ",) -ROOT.gSystem.Load("libFCCAnalyses") -ROOT.gErrorIgnoreLevel = ROOT.kFatal -#Is this still needed?? 01/04/2022 still to be the case -_fcc = ROOT.dummyLoader - - -date=datetime.datetime.fromtimestamp(datetime.datetime.now().timestamp()).strftime('%Y-%m-%d_%H-%M-%S') +DATE = datetime.datetime.fromtimestamp(datetime.datetime.now().timestamp()).strftime('%Y-%m-%d_%H-%M-%S') #__________________________________________________________ def getElement(rdfModule, element, isFinal=False): @@ -58,12 +51,12 @@ def getElement(rdfModule, element, isFinal=False): return "" elif element=='batchQueue': - print('The variable <{}> is optional in your analysys.py file, return default value workday'.format(element)) + print('The variable <{}> is optional in your analysis.py file, return default value workday'.format(element)) if isFinal: print('The option <{}> is not available in final analysis'.format(element)) return "workday" elif element=='compGroup': - print('The variable <{}> is optional in your analysys.py file, return default value group_u_FCC.local_gen'.format(element)) + print('The variable <{}> is optional in your analysis.py file, return default value group_u_FCC.local_gen'.format(element)) if isFinal: print('The option <{}> is not available in final analysis'.format(element)) return "group_u_FCC.local_gen" @@ -83,7 +76,7 @@ def getElement(rdfModule, element, isFinal=False): return "" elif element=='testFile': - print('The variable <{}> is optional in your analysys.py file, return default file'.format(element)) + print('The variable <{}> is optional in your analysis.py file, return default file'.format(element)) if isFinal: print('The option <{}> is not available in final analysis'.format(element)) return "root://eospublic.cern.ch//eos/experiment/fcc/ee/generation/DelphesEvents/spring2021/IDEA/p8_ee_Zbb_ecm91_EvtGen_Bc2TauNuTAUHADNU/events_131527278.root" @@ -148,12 +141,12 @@ def getElement(rdfModule, element, isFinal=False): else: print('The option <{}> is not available in presel analysis'.format(element)) elif element=='geometryFile': - print('The variable <{}> is optional in your analysys.py file, return default value empty string'.format(element)) + print('The variable <{}> is optional in your analysis.py file, return default value empty string'.format(element)) if isFinal: print('The option <{}> is not available in final analysis'.format(element)) return "" elif element=='readoutName': - print('The variable <{}> is optional in your analysys.py file, return default value empty string'.format(element)) + print('The variable <{}> is optional in your analysis.py file, return default value empty string'.format(element)) if isFinal: print('The option <{}> is not available in final analysis'.format(element)) return "" @@ -389,10 +382,21 @@ def runRDF(rdfModule, inputlist, outFile, nevt, args): #__________________________________________________________ def sendToBatch(rdfModule, chunkList, process, analysisFile): localDir = os.environ["LOCAL_DIR"] - logDir = localDir+"/BatchOutputs/{}/{}".format(date,process) + logDir = localDir+"/BatchOutputs/{}/{}".format(DATE, process) if not os.path.exists(logDir): os.system("mkdir -p {}".format(logDir)) + # Making sure the FCCAnalyses libraries are compiled and installed + try: + subprocess.check_output(['make', 'install'], + cwd=localDir+'/build', + stderr=subprocess.DEVNULL + ) + except subprocess.CalledProcessError as e: + print("----> The FCCanalyses libraries are not properly build and installed!") + print('----> Aborting job submission...') + sys.exit(3) + outputDir = getElement(rdfModule, "outputDir") outputDirEos = getElement(rdfModule, "outputDirEos") eosType = getElement(rdfModule, "eosType") @@ -416,10 +420,7 @@ def sendToBatch(rdfModule, chunkList, process, analysisFile): subprocess.getstatusoutput('chmod 777 %s'%(frunname)) frun.write('#!/bin/bash\n') - frun.write('source /cvmfs/sw.hsf.org/key4hep/setup.sh\n') - frun.write('export PYTHONPATH=$LOCAL_DIR:$PYTHONPATH\n') - frun.write('export LD_LIBRARY_PATH=$LOCAL_DIR/install/lib:$LD_LIBRARY_PATH\n') - frun.write('export ROOT_INCLUDE_PATH=$LOCAL_DIR/install/include/FCCAnalyses:$ROOT_INCLUDE_PATH\n') + frun.write('source ' + localDir + '/setup.sh\n') #add userBatchConfig if any if userBatchConfig!="": @@ -434,9 +435,9 @@ def sendToBatch(rdfModule, chunkList, process, analysisFile): frun.write('cd job{}_chunk{}\n'.format(process,ch)) if not os.path.isabs(outputDir): - frun.write('$LOCAL_DIR/bin/fccanalysis run {} --batch --output {}chunk{}.root --files-list '.format(analysisFile, outputDir, ch)) + frun.write(localDir + '/bin/fccanalysis run {} --batch --output {}chunk{}.root --files-list '.format(analysisFile, outputDir, ch)) else: - frun.write('$LOCAL_DIR/bin/fccanalysis run {} --batch --output {}{}/chunk{}.root --files-list '.format(analysisFile, outputDir, process,ch)) + frun.write(localDir + '/bin/fccanalysis run {} --batch --output {}{}/chunk{}.root --files-list '.format(analysisFile, outputDir, process,ch)) for ff in range(len(chunkList[ch])): frun.write(' %s'%(chunkList[ch][ff])) @@ -468,7 +469,7 @@ def sendToBatch(rdfModule, chunkList, process, analysisFile): frun_condor.write('Log = {}/condor_job.{}.$(ClusterId).$(ProcId).log\n'.format(logDir,process)) frun_condor.write('Output = {}/condor_job.{}.$(ClusterId).$(ProcId).out\n'.format(logDir,process)) frun_condor.write('Error = {}/condor_job.{}.$(ClusterId).$(ProcId).error\n'.format(logDir,process)) - frun_condor.write('getenv = True\n') + frun_condor.write('getenv = False\n') frun_condor.write('environment = "LS_SUBCWD={}"\n'.format(logDir)) # not sure frun_condor.write('requirements = ( (OpSysAndVer =?= "CentOS7") && (Machine =!= LastRemoteHost) && (TARGET.has_avx2 =?= True) )\n') frun_condor.write('on_exit_remove = (ExitBySignal == False) && (ExitCode == 0)\n') @@ -486,9 +487,9 @@ def sendToBatch(rdfModule, chunkList, process, analysisFile): #__________________________________________________________ def addeosType(fileName): sfileName=fileName.split('/') - if sfileName[1]=='experiment': + if sfileName[2]=='experiment': fileName='root://eospublic.cern.ch/'+fileName - elif sfileName[1]=='user' or sfileName[1].contains('home-'): + elif sfileName[2]=='user' or sfileName[2].contains('home-'): fileName='root://eosuser.cern.ch/'+fileName else: print('unknown eos type, please check with developers as it might not run with best performances') @@ -503,7 +504,7 @@ def runLocal(rdfModule, fileList, args): nevents_local = 0 for fileName in fileList: - if fileName.split('/')[0]=='eos': + if fileName.split('/')[1]=='eos': fileName=addeosType(fileName) fileListRoot.push_back(fileName) @@ -570,10 +571,9 @@ def runLocal(rdfModule, fileList, args): if args.bench: import json - analysis_path = args.pathToAnalysisScript.rsplit('/', 1)[0] analysis_name = getElement(rdfModule, 'analysisName') if not analysis_name: - analysis_name = analysis_path + analysis_name = args.pathToAnalysisScript bench_time = {} bench_time['name'] = 'Time spent running the analysis: ' @@ -581,7 +581,7 @@ def runLocal(rdfModule, fileList, args): bench_time['unit'] = 'Seconds' bench_time['value'] = elapsed_time bench_time['range'] = 10 - bench_time['extra'] = 'Analysis path: ' + analysis_path + bench_time['extra'] = 'Analysis path: ' + args.pathToAnalysisScript saveBenchmark('benchmarks_smaller_better.json', bench_time) bench_evt_per_sec = {} @@ -590,7 +590,7 @@ def runLocal(rdfModule, fileList, args): bench_evt_per_sec['unit'] = 'Evt/s' bench_evt_per_sec['value'] = nevents_local / elapsed_time bench_time['range'] = 1000 - bench_time['extra'] = 'Analysis path: ' + analysis_path + bench_time['extra'] = 'Analysis path: ' + args.pathToAnalysisScript saveBenchmark('benchmarks_bigger_better.json', bench_evt_per_sec) @@ -867,8 +867,28 @@ def runFinal(rdfModule): histos = [] for v in histoList: - model = ROOT.RDF.TH1DModel(v, ";{};".format(histoList[v]["title"]), histoList[v]["bin"], histoList[v]["xmin"], histoList[v]["xmax"]) - histos.append(df_cut.Histo1D(model,histoList[v]["name"])) + if "name" in histoList[v]: # default 1D histogram + model = ROOT.RDF.TH1DModel(v, ";{};".format(histoList[v]["title"]), histoList[v]["bin"], histoList[v]["xmin"], histoList[v]["xmax"]) + histos.append(df_cut.Histo1D(model,histoList[v]["name"])) + elif "cols" in histoList[v]: # multi dim histogram (1, 2 or 3D) + cols = histoList[v]['cols'] + bins = histoList[v]['bins'] + bins_unpacked = tuple([i for sub in bins for i in sub]) + if len(bins) != len(cols): + print ('----> Amount of columns should be equal to the amount of bin configs.') + sys.exit(3) + if len(cols) == 1: + histos.append(df_cut.Histo1D((v, "", *bins_unpacked), cols[0])) + elif len(cols) == 2: + histos.append(df_cut.Histo2D((v, "", *bins_unpacked), cols[0], cols[1])) + elif len(cols) == 3: + histos.append(df_cut.Histo3D((v, "", *bins_unpacked), cols[0], cols[1], cols[2])) + else: + print ('----> Only 1, 2 or 3D histograms supported.') + sys.exit(3) + else: + print ('----> Error parsing the histogram config. Provide either name or cols.') + sys.exit(3) histos_list.append(histos) if doTree: @@ -986,7 +1006,6 @@ def runFinal(rdfModule): #__________________________________________________________ def runPlots(analysisFile): - import config.doPlots as dp dp.run(analysisFile) @@ -1022,7 +1041,7 @@ def setup_run_parser(parser): publicOptions.add_argument("--rerunfailed", action='store_true', help="Rerun failed jobs", default=False) publicOptions.add_argument("--jobdir", help="Specify the batch job directory", type=str, default="output.root") publicOptions.add_argument("--eloglevel", help="Specify the RDataFrame ELogLevel", type=str, default="kUnset", choices = ['kUnset','kFatal','kError','kWarning','kInfo','kDebug']) - + internalOptions = parser.add_argument_group('\033[4m\033[1m\033[91m Internal options, NOT FOR USERS\033[0m') internalOptions.add_argument("--batch", action='store_true', help="Submit on batch", default=False) @@ -1046,45 +1065,74 @@ def run(mainparser, subparser=None): print("specify a valid analysis script in the command line arguments") sys.exit(3) + print ("----> Info: Loading analyzers from libFCCAnalyses... ",) + ROOT.gSystem.Load("libFCCAnalyses") + ROOT.gErrorIgnoreLevel = ROOT.kFatal + #Is this still needed?? 01/04/2022 still to be the case + _fcc = ROOT.dummyLoader + #set the RDF ELogLevel try: verbosity = ROOT.Experimental.RLogScopedVerbosity(ROOT.Detail.RDF.RDFLogChannel(), getattr(ROOT.Experimental.ELogLevel,args.eloglevel)) except AttributeError: pass #load the analysis - analysisFile=os.path.abspath(analysisFile) - print ("--------------loading analysis file ",analysisFile) + analysisFile = os.path.abspath(analysisFile) + print('----> Info: Loading analysis file:') + print(' ' + analysisFile) rdfSpec = importlib.util.spec_from_file_location("rdfanalysis", analysisFile) rdfModule = importlib.util.module_from_spec(rdfSpec) rdfSpec.loader.exec_module(rdfModule) - try: - args.command - if args.command == "run": runStages(args, rdfModule, args.preprocess, analysisFile) - elif args.command == "final": runFinal(rdfModule) - elif args.command == "plots": runPlots(analysisFile) + if hasattr(args, 'command'): + if args.command == "run": + try: + runStages(args, rdfModule, args.preprocess, analysisFile) + except Exception as excp: + print('----> Error: During the execution of the stage file:') + print(' ' + analysisFile) + print(' exception occurred:') + print(excp) + elif args.command == "final": + try: + runFinal(rdfModule) + except Exception as excp: + print('----> Error: During the execution of the final stage file:') + print(' ' + analysisFile) + print(' exception occurred:') + print(excp) + elif args.command == "plots": + try: + runPlots(analysisFile) + except Exception as excp: + print('----> Error: During the execution of the plots file:') + print(' ' + analysisFile) + print(' exception occurred:') + print(excp) return - except AttributeError: - print("============running the old way") + print('----> Info: Running the old way...') + print(' This way of running the analysis is deprecated and will') + print(' be removed in the next release!') - #below is legacy using the old way of runnig with options in "python config/FCCAnalysisRun.py analysis.py --options - #check if this is final analysis + # below is legacy using the old way of runnig with options in + # "python config/FCCAnalysisRun.py analysis.py --options check if this is + # final analysis if args.final: if args.plots: - print ('----> Can not have --plots with --final, exit') + print('----> Can not have --plots with --final, exit') sys.exit(3) if args.preprocess: - print ('----> Can not have --preprocess with --final, exit') + print('----> Can not have --preprocess with --final, exit') sys.exit(3) runFinal(rdfModule) elif args.plots: if args.final: - print ('----> Can not have --final with --plots, exit') + print('----> Can not have --final with --plots, exit') sys.exit(3) if args.preprocess: - print ('----> Can not have --preprocess with --plots, exit') + print('----> Can not have --preprocess with --plots, exit') sys.exit(3) runPlots(analysisFile) @@ -1094,10 +1142,10 @@ def run(mainparser, subparser=None): else: if args.preprocess: if args.plots: - print ('----> Can not have --plots with --preprocess, exit') + print('----> Can not have --plots with --preprocess, exit') sys.exit(3) if args.final: - print ('----> Can not have --final with --preprocess, exit') + print('----> Can not have --final with --preprocess, exit') sys.exit(3) runStages(args, rdfModule, args.preprocess, analysisFile) diff --git a/config/Parsers.py b/config/Parsers.py index d1788bdc00..0b46753f4d 100644 --- a/config/Parsers.py +++ b/config/Parsers.py @@ -7,6 +7,32 @@ def setup_init_parser(parser): publicOptions.add_argument('--standalone', action='store_true', help="also add CMake directive to build standalone package", default=False) publicOptions.add_argument('--output-dir', help='output directory where the analysis package will be written') +def setup_build_parser(parser): + publicOptions = parser.add_argument_group('User build options') + publicOptions.add_argument('--clean-build', + action='store_true', + default=False, + help='do a clean build') + publicOptions.add_argument('--build-threads', + type=int, + default=1, + help='bumber of threads when building (equivalen to `make -j`)') + +def setup_pin_parser(parser): + publicOptions = parser.add_argument_group('User pin options') + publicOptions.add_argument('-c', '--clear', + action='store_true', + default=False, + help='clear analysis pin') + publicOptions.add_argument('-f', '--force', + action='store_true', + default=False, + help='force recreate analysis pin') + publicOptions.add_argument('-s', '--show', + action='store_true', + default=False, + help='show pinned stack') + def setup_run_parser(parser): publicOptions = parser.add_argument_group('User options') publicOptions.add_argument("pathToAnalysisScript", help="path to analysis script") diff --git a/config/build_analysis.py b/config/build_analysis.py new file mode 100644 index 0000000000..ef33cf6184 --- /dev/null +++ b/config/build_analysis.py @@ -0,0 +1,69 @@ +''' +The module takes care of building FCCAnalyses +''' + +import os +import sys +import subprocess +import pathlib +import shutil + + +def run_subprocess(command, run_dir): + ''' + Run subprocess in specified directory. + Check only the return value, otherwise keep the subprocess connected to + stdin/stout/stderr. + ''' + try: + proc = subprocess.Popen(command, cwd=run_dir) + status = proc.wait() + + if status != 0: + print('----> Error encountered!') + print(' Aborting...') + sys.exit(3) + + except KeyboardInterrupt: + print('----> Aborting...') + sys.exit(0) + + +def build_analysis(mainparser): + ''' + Main build steering function + ''' + args, _ = mainparser.parse_known_args() + + if 'LOCAL_DIR' not in os.environ: + print('----> FCCAnalyses environment not set up correctly!') + print(' Aborting...') + sys.exit(3) + + local_dir = os.environ.get('LOCAL_DIR') + build_path = pathlib.Path(local_dir + '/build') + install_path = pathlib.Path(local_dir + '/install') + + print('----> Building analysis located in:') + print(' ' + local_dir) + + if args.clean_build: + print('----> Clearing build and install directories...') + if build_path.is_dir(): + shutil.rmtree(build_path) + if install_path.is_dir(): + shutil.rmtree(install_path) + + if not build_path.is_dir(): + print('----> Creating build directory...') + os.makedirs(build_path) + + run_subprocess(['cmake', '-DCMAKE_INSTALL_PREFIX=../install', '..'], + local_dir + '/build') + + if not install_path.is_dir(): + print('----> Creating install directory...') + os.makedirs(install_path) + + run_subprocess(['make', '-j{}'.format(args.build_threads), 'install'], + local_dir + '/build') diff --git a/config/doPlots.py b/config/doPlots.py index 0873a8c4cf..51ebb1578e 100644 --- a/config/doPlots.py +++ b/config/doPlots.py @@ -17,8 +17,18 @@ def sortedDictValues(dic): keys = sorted(dic) return [dic[key] for key in keys] +def formatStatUncHist(hists, name, hstyle=3254): + hTot = hists[0].Clone(name + "_unc") + for h in hists[1:]: + hTot.Add(h) + hTot.SetFillColor(ROOT.kBlack) + hTot.SetMarkerSize(0) + hTot.SetLineWidth(0) + hTot.SetFillStyle(hstyle) + return hTot + #__________________________________________________________ -def mapHistos(var, label, sel, param): +def mapHistos(var, label, sel, param, rebin): print ('run plots for var:{} label:{} selection:{}'.format(var,label,sel)) signal=param.plots[label]['signal'] backgrounds=param.plots[label]['backgrounds'] @@ -42,6 +52,7 @@ def mapHistos(var, label, sel, param): param.scaleSig=scaleSig print ('scaleSig ',scaleSig) hh.Scale(param.intLumi*scaleSig) + hh.Rebin(rebin) if len(hsignal[s])==0: hsignal[s].append(hh) @@ -62,6 +73,7 @@ def mapHistos(var, label, sel, param): h=tf.Get(var) hh = copy.deepcopy(h) hh.Scale(param.intLumi) + hh.Rebin(rebin) if len(hbackgrounds[b])==0: hbackgrounds[b].append(hh) else: @@ -79,7 +91,7 @@ def mapHistos(var, label, sel, param): return hsignal,hbackgrounds #__________________________________________________________ -def runPlots(var,sel,param,hsignal,hbackgrounds,extralab,splitLeg): +def runPlots(var,sel,param,hsignal,hbackgrounds,extralab,splitLeg,plotStatUnc): ###Below are settings for separate signal and background legends if(splitLeg): @@ -164,22 +176,22 @@ def runPlots(var,sel,param,hsignal,hbackgrounds,extralab,splitLeg): param.scaleSig=scaleSig if 'AAAyields' in var: - drawStack(var, 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, False , True , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields) + drawStack(var, 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, False , True , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields, plotStatUnc) return if 'stack' in param.stacksig: if 'lin' in param.yaxis: - drawStack(var+"_stack_lin", 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, False , True , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields) + drawStack(var+"_stack_lin", 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, False , True , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields, plotStatUnc) if 'log' in param.yaxis: - drawStack(var+"_stack_log", 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, True , True , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields) + drawStack(var+"_stack_log", 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, True , True , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields, plotStatUnc) if 'lin' not in param.yaxis and 'log' not in param.yaxis: print ('unrecognised option in formats, should be [\'lin\',\'log\']'.format(param.formats)) if 'nostack' in param.stacksig: if 'lin' in param.yaxis: - drawStack(var+"_nostack_lin", 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, False , False , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields) + drawStack(var+"_nostack_lin", 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, False , False , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields, plotStatUnc) if 'log' in param.yaxis: - drawStack(var+"_nostack_log", 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, True , False , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields) + drawStack(var+"_nostack_log", 'events', leg, lt, rt, param.formats, param.outdir+"/"+sel, True , False , histos, colors, param.ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, leg2, yields, plotStatUnc) if 'lin' not in param.yaxis and 'log' not in param.yaxis: print ('unrecognised option in formats, should be [\'lin\',\'log\']'.format(param.formats)) if 'stack' not in param.stacksig and 'nostack' not in param.stacksig: @@ -187,7 +199,7 @@ def runPlots(var,sel,param,hsignal,hbackgrounds,extralab,splitLeg): #_____________________________________________________________________________________________________________ -def drawStack(name, ylabel, legend, leftText, rightText, formats, directory, logY, stacksig, histos, colors, ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, legend2=None, yields=None): +def drawStack(name, ylabel, legend, leftText, rightText, formats, directory, logY, stacksig, histos, colors, ana_tex, extralab, scaleSig, customLabel, nsig, nbkg, legend2=None, yields=None, plotStatUnc=False): canvas = ROOT.TCanvas(name, name, 600, 600) canvas.SetLogy(logY) @@ -264,6 +276,10 @@ def drawStack(name, ylabel, legend, leftText, rightText, formats, directory, log if not stacksig: hStack.Draw("hist") + if plotStatUnc: + hUnc_bkg = formatStatUncHist(hStack.GetHists(), "bkg_only") # bkg-only uncertainty + hUnc_bkg.Draw("E2 SAME") + # define stacked signal histo hStackSig = ROOT.THStack("hstacksig","") @@ -279,11 +295,19 @@ def drawStack(name, ylabel, legend, leftText, rightText, formats, directory, log if stacksig: hStack.Draw("hist") + if plotStatUnc: + hUnc_sig_bkg = formatStatUncHist(hStack.GetHists(), "sig_bkg") # sig+bkg uncertainty + hUnc_sig_bkg.Draw("E2 SAME") + xlabel = histos[0].GetXaxis().GetTitle() if (not stacksig) and nbkg==0: hStackSig.Draw("hist nostack") + if plotStatUnc: + for sHist in hStackSig.GetHists(): + hUnc_sig = formatStatUncHist([sHist], "sig", 3245) # sigs uncertainty + hUnc_sig.Draw("E2 SAME") hStackSig.GetXaxis().SetTitle(xlabel) hStackSig.GetYaxis().SetTitle(ylabel) @@ -355,7 +379,11 @@ def drawStack(name, ylabel, legend, leftText, rightText, formats, directory, log if 'AAAyields' not in name and nbkg>0: hStackSig.Draw("same hist nostack") else: - hStackSig.Draw("hist nostack") + hStackSig.Draw("hist nostack") + if plotStatUnc: + for sHist in hStackSig.GetHists(): + hUnc_sig = formatStatUncHist([sHist], "sig", 3245) # sigs uncertainty + hUnc_sig.Draw("E2 SAME") legend.Draw() if legend2 != None: @@ -513,12 +541,17 @@ def run(paramFile): splitLeg = param.splitLeg else: splitLeg = False + + if hasattr(param, "plotStatUnc"): + plotStatUnc = param.plotStatUnc + else: + plotStatUnc = False counter=0 - for var in param.variables: + for iVar,var in enumerate(param.variables): for label, sels in param.selections.items(): for sel in sels: - hsignal,hbackgrounds=mapHistos(var,label,sel, param) - runPlots(var+"_"+label,sel,param,hsignal,hbackgrounds,param.extralabel[sel],splitLeg) - if counter==0: runPlots("AAAyields_"+label,sel,param,hsignal,hbackgrounds,param.extralabel[sel],splitLeg) + hsignal,hbackgrounds=mapHistos(var,label,sel, param, rebin=param.rebin[iVar] if hasattr(param, "rebin") and len(param.rebin) == len(param.variables) else 1) + runPlots(var+"_"+label,sel,param,hsignal,hbackgrounds,param.extralabel[sel],splitLeg,plotStatUnc) + if counter==0: runPlots("AAAyields_"+label,sel,param,hsignal,hbackgrounds,param.extralabel[sel],splitLeg,plotStatUnc) counter+=1 diff --git a/config/pin_analysis.py b/config/pin_analysis.py new file mode 100644 index 0000000000..349e0b89e7 --- /dev/null +++ b/config/pin_analysis.py @@ -0,0 +1,105 @@ +''' +The module pins/unpins FCCAnalyses to the current version of the Key4hep stack +''' + +import os +import sys +import pathlib + + +class PinAnalysis: + ''' + Pin/unpin FCCAnalyses to the current version of the Key4hep stack + ''' + def __init__(self, mainparser): + ''' + Setup analysis pinning + ''' + + if 'LOCAL_DIR' not in os.environ: + print('----> Error: FCCAnalyses environment not set up correctly!') + print(' Aborting...') + sys.exit(3) + + self.local_dir = os.environ.get('LOCAL_DIR') + self.pin_path = pathlib.Path(self.local_dir + '/.fccana/stackpin') + + self.args, _ = mainparser.parse_known_args() + + if self.args.show: + self.show_pin() + + if self.args.clear: + self.unpin_analysis() + else: + self.pin_analysis() + + def show_pin(self): + ''' + Show current pin + ''' + if not self.pin_path.is_file(): + print('----> Info: Analysis not pinned.') + sys.exit(0) + + with open(self.pin_path, 'r') as pinfile: + lines = pinfile.readlines() + + if len(lines) != 1: + print('----> Error: Analysis pin file malformed!') + sys.exit(3) + + stack_path = lines[0] + + print('----> Analysis pinned to the following Key4hep stack:') + print(' ' + stack_path) + + sys.exit(0) + + def unpin_analysis(self): + ''' + Unpin analysis from any Key4hep stack version + ''' + if not self.pin_path.is_file(): + print('----> Warning: Analysis pin file not found!') + sys.exit(0) + + print('----> Unpinning analysis located in:') + print(' ' + self.local_dir) + self.pin_path.unlink() + + with os.scandir(os.path.dirname(self.pin_path)) as item: + if any(item): + sys.exit(0) + os.rmdir(os.path.dirname(self.pin_path)) + + sys.exit(0) + + def pin_analysis(self): + ''' + Pin analysis to the Key4hep stack version + ''' + if self.pin_path.is_file() and not self.args.force: + print('----> Warning: Analysis pin file already created!') + print(' Use "--force" flag to overwrite current pin.') + print(' Aborting...') + sys.exit(0) + + if 'KEY4HEP_STACK' not in os.environ: + print('----> Error: FCCAnalyses environment not set up correctly!') + print(' Aborting...') + sys.exit(3) + + stack_path = os.environ.get('KEY4HEP_STACK') + + print('----> Pinning analysis located in:') + print(' ' + self.local_dir) + print(' to Key4hep stack:') + print(' ' + stack_path) + + os.makedirs(os.path.dirname(self.pin_path), exist_ok=True) + + with open(self.pin_path, 'w') as pinfile: + pinfile.write(stack_path + '\n') + + sys.exit(0) diff --git a/examples/FCCee/fullSim/caloNtupleizer/analysis.py b/examples/FCCee/fullSim/caloNtupleizer/analysis.py index 42cf483a34..9d4e8aa659 100644 --- a/examples/FCCee/fullSim/caloNtupleizer/analysis.py +++ b/examples/FCCee/fullSim/caloNtupleizer/analysis.py @@ -20,7 +20,7 @@ def str2bool(v): parser = argparse.ArgumentParser() -parser.add_argument("-inputFiles", default = '/afs/cern.ch/user/b/brfranco/work/public/Fellow/FCCSW/key4hep_trial3/FCCSW/Examples/options/output_fullCalo_SimAndDigi_withCluster_MagneticField_False_pMin_10000_MeV_ThetaMinMax_45_135_pdgId_11_pythiaFalse.root', help = "Input rootfiles (can be a single file or a regex)", type = str) +parser.add_argument("-inputFiles", default = '/eos/user/b/brfranco/rootfile_storage/220618_gamma_flat_1_100_noNoise/fccsw_output_pdgID_22_pMin_1000_pMax_100000_thetaMin_50_thetaMax_130.root', help = "Input rootfiles (can be a single file or a regex)", type = str) parser.add_argument("-outputFolder", default = os.path.join("outputs", date.today().strftime("%y%m%d")), help = "Output folder for the rootfiles", type = str) parser.add_argument("-storeCellBranches", default = True, help="Whether or not to store cell information", type = str2bool) parser.add_argument("-cellBranchNames", default = ["ECalBarrelPositionedCells"], help="Name of the cell branch in the input rootfile. Must have position information!", type = str) @@ -31,10 +31,14 @@ def str2bool(v): parser.add_argument("-storeGenBranches", default = True, help="Whether or not to store gen information", type = str2bool) parser.add_argument("-genBranchName", default = "genParticles", help="Name of the gen particle branch in the input rootfile", type = str) parser.add_argument("-storeSimParticleSecondaries", default = False, help="Whether to store the SimParticleSecondaries information", type = str2bool) -parser.add_argument("-simParticleSecondariesNames", default = ["SimParticleSecondaries"], help = "name of the SimParticleSecondaries branch", type = str) -parser.add_argument("-useGeometry", default = False, help="Whether or not to load the FCCSW geometry. Used to get the detector segmentation for e.g. the definition of the cell layer index.", type = str2bool) -parser.add_argument("-geometryFile", default = '/afs/cern.ch/user/b/brfranco/work/public/Fellow/FCCSW/dummy_releases/Mark_Test2/FCCDetectors/Detector/DetFCCeeIDEA-LAr/compact/FCCee_DectMaster.xml', help = "Path to the xml geometry file", type = str) +parser.add_argument("-simParticleSecondariesNames", default = ["SimParticleSecondaries"], help = "name of the SimParticleSecondaries branch", type = str, nargs = '+') +parser.add_argument("-useGeometry", default = True, help="Whether or not to load the FCCSW geometry. Used to get the detector segmentation for e.g. the definition of the cell layer index.", type = str2bool) +parser.add_argument("-geometryFile", default = '/afs/cern.ch/user/b/brfranco/work/public/Fellow/FCCSW/test_recipe_April2022/FCCDetectors/Detector/DetFCCeeIDEA-LAr/compact/FCCee_DectMaster.xml', help = "Path to the xml geometry file", type = str) parser.add_argument("-readoutName", default = 'ECalBarrelPhiEta', help = "Name of the readout to use for the layer/phi/theta bin definition", type = str) +parser.add_argument("-extractHighestEnergyClusterCells", default = False, help = "Use it if you need cells attached to the higest energy cluster, will use the first cluster collection in clusterBranchNames", type = str2bool) +parser.add_argument("-isPi0", default = 0, help = "Weaver training needs a branch in the input tree with the target label: set it to 1 when running on pi0 files, 0 for photon files", type = int) +parser.add_argument("-doWeaverInference", default = False, help = "Apply weaver inference on highest energy cluster cell variables, extractHighestEnergyClusterCells must be set to True", type = str2bool) +parser.add_argument("-weaverFiles", default = [os.path.join("/afs/cern.ch/user/b/brfranco/work/public/Fellow/FCCSW/221123/LAr_scripts/machineLearning/weaver_models_theta_phi/", fileName) for fileName in ["fccee_pi_vs_gamma_simpler_best_epoch_state.onnx", "preprocess.json"]], help = "Path to the '.onnx' (first argument) and '.json' (second argument) coming out of your training", type = str, nargs = '+') args = parser.parse_args() @@ -66,7 +70,6 @@ def run(self): dict_outputBranchName_function["%s_phi"%cellBranchName] = "CaloNtupleizer::getCaloHit_phi(%s)"%cellBranchName dict_outputBranchName_function["%s_theta"%cellBranchName] = "CaloNtupleizer::getCaloHit_theta(%s)"%cellBranchName dict_outputBranchName_function["%s_eta"%cellBranchName] = "CaloNtupleizer::getCaloHit_eta(%s)"%cellBranchName - #dict_outputBranchName_function["%s_position"%cellBranchName] = "CaloNtupleizer::getCaloHit_positionVector3(%s)"%cellBranchName dict_outputBranchName_function["%s_energy"%cellBranchName] = "CaloNtupleizer::getCaloHit_energy(%s)"%cellBranchName if args.useGeometry: dict_outputBranchName_function["%s_phiBin"%cellBranchName] = "CaloNtupleizer::getCaloHit_phiBin(%s)"%cellBranchName @@ -83,7 +86,6 @@ def run(self): dict_outputBranchName_function["%s_theta"%clusterBranchName] = "CaloNtupleizer::getCaloCluster_theta(%s)"%clusterBranchName dict_outputBranchName_function["%s_eta"%clusterBranchName] = "CaloNtupleizer::getCaloCluster_eta(%s)"%clusterBranchName dict_outputBranchName_function["%s_energy"%clusterBranchName] = "CaloNtupleizer::getCaloCluster_energy(%s)"%clusterBranchName - #dict_outputBranchName_function["%s_position"%clusterBranchName] = "CaloNtupleizer::getCaloCluster_positionVector3(%s)"%clusterBranchName dict_outputBranchName_function["%s_firstCell"%clusterBranchName] = "CaloNtupleizer::getCaloCluster_firstCell(%s)"%clusterBranchName dict_outputBranchName_function["%s_lastCell"%clusterBranchName] = "CaloNtupleizer::getCaloCluster_lastCell(%s)"%clusterBranchName @@ -96,7 +98,6 @@ def run(self): dict_outputBranchName_function["%s_phi"%clusterCellsBranchName] = "CaloNtupleizer::getCaloHit_phi(%s)"%clusterCellsBranchName dict_outputBranchName_function["%s_theta"%clusterCellsBranchName] = "CaloNtupleizer::getCaloHit_theta(%s)"%clusterCellsBranchName dict_outputBranchName_function["%s_eta"%clusterCellsBranchName] = "CaloNtupleizer::getCaloHit_eta(%s)"%clusterCellsBranchName - #dict_outputBranchName_function["%s_position"%clusterCellsBranchName] = "CaloNtupleizer::getCaloHit_positionVector3(%s)"%clusterCellsBranchName dict_outputBranchName_function["%s_energy"%clusterCellsBranchName] = "CaloNtupleizer::getCaloHit_energy(%s)"%clusterCellsBranchName if args.useGeometry: dict_outputBranchName_function["%s_phiBin"%clusterCellsBranchName] = "CaloNtupleizer::getCaloHit_phiBin(%s)"%clusterCellsBranchName @@ -124,14 +125,69 @@ def run(self): dict_outputBranchName_function["genParticle_pid"] = "FCCAnalyses::MCParticle::get_pdg(%s)"%args.genBranchName dict_outputBranchName_function["genParticle_status"] = "FCCAnalyses::MCParticle::get_genStatus(%s)"%args.genBranchName + if args.extractHighestEnergyClusterCells: + clusterBranchName = args.clusterBranchNames[0] + dict_outputBranchName_function["highestEnergyCluster_index"] = "std::distance({0}.energy.begin(), std::max_element({0}.energy.begin(), {0}.energy.end()))".format(clusterBranchName) + dict_outputBranchName_function["highestEnergyCluster_isPhoton"] = "%d"%(not args.isPi0) + dict_outputBranchName_function["highestEnergyCluster_isPi0"] = "%d"%args.isPi0 + dict_outputBranchName_function["highestEnergyCluster_energy"] = "%s_energy[highestEnergyCluster_index]"%clusterBranchName + dict_outputBranchName_function["highestEnergyCluster_phi"] = "%s_phi[highestEnergyCluster_index]"%clusterBranchName + dict_outputBranchName_function["highestEnergyCluster_theta"] = "%s_theta[highestEnergyCluster_index]"%clusterBranchName + dict_outputBranchName_function["highestEnergyCluster_firstCell_index"] = "%s[highestEnergyCluster_index].hits_begin"%clusterBranchName + dict_outputBranchName_function["highestEnergyCluster_lastCell_index"] = "%s[highestEnergyCluster_index].hits_end"%clusterBranchName + ROOT.gInterpreter.Declare(""" + template + ROOT::VecOps::RVec myRange(ROOT::VecOps::RVec& vec, std::size_t begin, std::size_t end) + { + ROOT::VecOps::RVec ret; + ret.reserve(end - begin); + for (auto i = begin; i < end; ++i) + ret.push_back(vec[i]); + return ret; + } + """) + + dict_outputBranchName_function["highestEnergyCluster_cells_transient"] = "myRange(PositionedCaloClusterCells, highestEnergyCluster_firstCell_index, highestEnergyCluster_lastCell_index)" + dict_outputBranchName_function["highestEnergyCluster_cells_energy"] = "CaloNtupleizer::getCaloHit_energy(highestEnergyCluster_cells_transient)" + dict_outputBranchName_function["highestEnergyCluster_cells_relative_phi"] = "CaloNtupleizer::getCaloHit_phi(highestEnergyCluster_cells_transient) - highestEnergyCluster_phi" + dict_outputBranchName_function["highestEnergyCluster_cells_relative_theta"] = "CaloNtupleizer::getCaloHit_theta(highestEnergyCluster_cells_transient) - highestEnergyCluster_theta" + dict_outputBranchName_function["highestEnergyCluster_cells_layer"] = "CaloNtupleizer::getCaloHit_layer(highestEnergyCluster_cells_transient)" + dict_outputBranchName_function["highestEnergyCluster_cells_n"] = "highestEnergyCluster_cells_transient.size()" + dict_outputBranchName_function["highestEnergyCluster_cells_x"] = "myRange(PositionedCaloClusterCells.position.x, highestEnergyCluster_firstCell_index, highestEnergyCluster_lastCell_index)" + dict_outputBranchName_function["highestEnergyCluster_cells_y"] = "myRange(PositionedCaloClusterCells.position.y, highestEnergyCluster_firstCell_index, highestEnergyCluster_lastCell_index)" + dict_outputBranchName_function["highestEnergyCluster_cells_radius"] = "sqrt(pow(highestEnergyCluster_cells_x,2)+pow(highestEnergyCluster_cells_y,2))" + df2 = self.df branchList = ROOT.vector('string')() - for branchName in dict_outputBranchName_function: - branchList.push_back(branchName) - print(branchName, dict_outputBranchName_function[branchName]) df2 = df2.Define(branchName, dict_outputBranchName_function[branchName]) + if not "transient" in branchName: + branchList.push_back(branchName) + print(branchName, dict_outputBranchName_function[branchName]) + + + if args.doWeaverInference:# placed here because Utils::as_vector can not be stored in the output rootfile but we need e.g. highestEnergyCluster_cells_energy to be defined + if not args.extractHighestEnergyClusterCells: + print("You must set extractHighestEnergyClusterCells to True to use WeaverInference") + sys.exit(1) + from ROOT import WeaverUtils + weaver = WeaverUtils.setup_weaver(args.weaverFiles[0], args.weaverFiles[1], ('highestEnergyCluster_cells_energy', 'relative_highestEnergyCluster_cells_phi', 'highestEnergyCluster_cells_relative_theta', 'highestEnergyCluster_cells_layer')) + df2 = (df2.Define("cells_e", "Utils::as_vector(highestEnergyCluster_cells_energy)") + .Define("cells_theta", "Utils::as_vector(highestEnergyCluster_cells_relative_theta)") + .Define("cells_phi", "Utils::as_vector(highestEnergyCluster_cells_relative_phi)") + .Define("cells_layer", "Utils::as_vector(highestEnergyCluster_cells_layer)") + .Define("MVAVec", "WeaverUtils::get_weights(cells_e, cells_phi, cells_theta, cells_layer)") + .Define("highestEnergyCluster_isPhoton_inferred", "WeaverUtils::get_weight(MVAVec, 0)") + .Define("highestEnergyCluster_isPi0_inferred", "WeaverUtils::get_weight(MVAVec, 1)")) + + branchList.push_back("highestEnergyCluster_isPhoton_inferred") + branchList.push_back("highestEnergyCluster_isPi0_inferred") + print("highestEnergyCluster_isPhoton_inferred WeaverUtils::get_weight(MVAVec, 0)") + print("highestEnergyCluster_isPi0_inferred WeaverUtils::get_weight(MVAVec, 1)") + + + df2.Snapshot("events", self.outname, branchList) diff --git a/examples/FCCee/higgs/mH-recoil/mumu/analysis_final.py b/examples/FCCee/higgs/mH-recoil/mumu/analysis_final.py index 2739fb3f6c..e9c3b1e3f4 100644 --- a/examples/FCCee/higgs/mH-recoil/mumu/analysis_final.py +++ b/examples/FCCee/higgs/mH-recoil/mumu/analysis_final.py @@ -42,4 +42,7 @@ "leptonic_recoil_m_zoom4":{"name":"Zcand_recoil_m","title":"Z leptonic recoil [GeV]","bin":800,"xmin":120,"xmax":140}, "leptonic_recoil_m_zoom5":{"name":"Zcand_recoil_m","title":"Z leptonic recoil [GeV]","bin":2000,"xmin":120,"xmax":140}, "leptonic_recoil_m_zoom6":{"name":"Zcand_recoil_m","title":"Z leptonic recoil [GeV]","bin":100,"xmin":130.3,"xmax":132.5}, + "mz_1D":{"cols":["Zcand_m"],"title":"m_{Z} [GeV]", "bins": [(40,80,100)]}, # 1D histogram (alternative syntax) + "mz_recoil_2D":{"cols":["Zcand_m", "Zcand_recoil_m"],"title":"m_{Z} - leptonic recoil [GeV]", "bins": [(40,80,100), (100,120,140)]}, # 2D histogram + "mz_recoil_3D":{"cols":["Zcand_m", "Zcand_recoil_m", "Zcand_recoil_m"],"title":"m_{Z} - leptonic recoil - leptonic recoil [GeV]", "bins": [(40,80,100), (100,120,140), (100,120,140)]}, # 3D histogram } diff --git a/examples/FCCee/higgs/mH-recoil/mumu/analysis_plots.py b/examples/FCCee/higgs/mH-recoil/mumu/analysis_plots.py index 581b8592a8..0f6aca0212 100644 --- a/examples/FCCee/higgs/mH-recoil/mumu/analysis_plots.py +++ b/examples/FCCee/higgs/mH-recoil/mumu/analysis_plots.py @@ -11,8 +11,10 @@ yaxis = ['lin','log'] stacksig = ['stack','nostack'] outdir = 'outputs/FCCee/higgs/mH-recoil/mumu/plots/' +plotStatUnc = True variables = ['mz','mz_zoom','leptonic_recoil_m','leptonic_recoil_m_zoom','leptonic_recoil_m_zoom2'] +rebin = [1, 1, 1, 1, 2] # uniform rebin per variable (optional) ###Dictonnary with the analysis name as a key, and the list of selections to be plotted for this analysis. The name of the selections should be the same than in the final selection selections = {} diff --git a/examples/FCCee/smearing/smear_jets.py b/examples/FCCee/smearing/smear_jets.py new file mode 100644 index 0000000000..557e7cfd24 --- /dev/null +++ b/examples/FCCee/smearing/smear_jets.py @@ -0,0 +1,231 @@ +import os +import urllib.request +import ROOT +from copy import deepcopy + +""" +This example runs the jet clustering sequence with Durham N=2 exclusive algorithm and produces jet scores for the various +flavour with variations of the impact parameter resolution and neutral hadrons energy resolutions. + +To run this example: + +fccanalysis run examples/FCCee/smearing/smear_jets.py \ +--files-list /eos/experiment/fcc/ee/generation/DelphesEvents/winter2023/IDEA/wzp6_ee_nunuH_Hss_ecm240/events_196755633.root \ +--nevents 100 \ + +""" + +# ____________________________________________________________ +def get_file_path(url, filename): + if os.path.exists(filename): + return os.path.abspath(filename) + else: + urllib.request.urlretrieve(url, os.path.basename(url)) + return os.path.basename(url) + + +# ____________________________________________________________ +def jet_sequence(df, collections, output_branches, tag=""): + + ## define jet clustering parameters + njets = 2 + + jetClusteringHelper = ExclusiveJetClusteringHelper(collections["PFParticles"], njets, tag) + ## run jet clustering + + df = jetClusteringHelper.define(df) + + output_branches += jetClusteringHelper.outputBranches() + + jets_p4 = "tlv_jets" + mjj = "mjj" + + if tag != "": + jets_p4 = "tlv_jets_{}".format(tag) + mjj = "mjj_{}".format(tag) + + df = df.Define(jets_p4, "JetConstituentsUtils::compute_tlv_jets({})".format(jetClusteringHelper.jets)) + df = df.Define(mjj, "JetConstituentsUtils::InvariantMass({}[0], {}[1])".format(jets_p4, jets_p4)) + output_branches.append(mjj) + + ## define jet flavour tagging parameters + + jetFlavourHelper = JetFlavourHelper( + collections, + jetClusteringHelper.jets, + jetClusteringHelper.constituents, + tag, + ) + + ## define observables for tagger + df = jetFlavourHelper.define(df) + + ## tagger inference + df = jetFlavourHelper.inference(weaver_preproc, weaver_model, df) + + output_branches += jetFlavourHelper.outputBranches() + + return df + + +# ____________________________________________________________ + +## input file needed for unit test in CI +testFile = "https://fccsw.web.cern.ch/fccsw/testsamples/wzp6_ee_nunuH_Hss_ecm240.root" + +## latest particle transformer model, trainied on 9M jets in winter2023 samples +model_name = "fccee_flavtagging_edm4hep_wc_v1" + +## model files needed for unit testing in CI +url_model_dir = "https://fccsw.web.cern.ch/fccsw/testsamples/jet_flavour_tagging/winter2023/wc_pt_13_01_2022/" +url_preproc = "{}/{}.json".format(url_model_dir, model_name) +url_model = "{}/{}.onnx".format(url_model_dir, model_name) + +## model files locally stored on /eos +model_dir = "/eos/experiment/fcc/ee/jet_flavour_tagging/winter2023/wc_pt_13_01_2022/" +local_preproc = "{}/{}.json".format(model_dir, model_name) +local_model = "{}/{}.onnx".format(model_dir, model_name) + +## get local file, else download from url +weaver_preproc = get_file_path(url_preproc, local_preproc) +weaver_model = get_file_path(url_model, local_model) + +from addons.ONNXRuntime.python.jetFlavourHelper import JetFlavourHelper +from addons.FastJet.python.jetClusteringHelper import ExclusiveJetClusteringHelper + +output_branches = [] + +# Mandatory: RDFanalysis class where the use defines the operations on the TTree +class RDFanalysis: + # __________________________________________________________ + # Mandatory: analysers funtion to define the analysers to process, please make sure you return the last dataframe, in this example it is df2 + def analysers(df): + + ## name of collections in EDM root files + collections = { + "GenParticles": "Particle", + "MCRecoMap": "MCRecoAssociations", + "PFParticles": "ReconstructedParticles", + "PFTracks": "EFlowTrack", + "PFPhotons": "EFlowPhoton", + "PFNeutralHadrons": "EFlowNeutralHadron", + "TrackState": "EFlowTrack_1", + "TrackerHits": "TrackerHits", + "CalorimeterHits": "CalorimeterHits", + "dNdx": "EFlowTrack_2", + "PathLength": "EFlowTrack_L", + "Bz": "magFieldBz", + } + + ## run full sequence with nominal detector + df = jet_sequence(df, collections, output_branches) + + ## define MC/Reco links, needed for smearing + df = ( + df.Alias("mc_reco_0", "{}#0.index".format(collections["MCRecoMap"])).Alias( + "mc_reco_1", "{}#1.index".format(collections["MCRecoMap"]) + ) + # matching between the RecoParticles and the MCParticles: + .Define( + "reco_mc_index", + "ReconstructedParticle2MC::getRP2MC_index(mc_reco_0,mc_reco_1,{})".format(collections["PFParticles"]), + ) + ) + + ## produce smeared collections + + ### run same sequences but with smeared collection + scale_factors = [1.0, 2.0, 5.0, 10.0] + + + for sf in scale_factors: + + ## 1. do Impact parameter smearing first + + collections_ip = deepcopy(collections) + ip_tag = "ip{}".format(sf).replace(".", "p") + collections_ip["TrackState"] = "TrackState_{}".format(ip_tag) + + # Generate a new set of tracks, re-scaling the covariance matrix + # order of the scaling factors : smear_d0, smear_phi, smear_omega, smear_z0, smear_tlambda + # the boolean flag is for debugging + # here smear only d0, z0 + df = df.Define( + collections_ip["TrackState"], + ROOT.SmearObjects.SmearedTracks(sf, 1.0, 1.0, sf, 1.0, False), + [collections["PFParticles"], collections["TrackState"], "reco_mc_index", collections["GenParticles"]], + ) + + ## run full sequence with covariance smeared detector + df = jet_sequence(df, collections_ip, output_branches, ip_tag) + + + ## 2. do Neutral Hadron energy smearing + + collections_res = deepcopy(collections) + res_tag = "res{}".format(sf).replace(".", "p") + collections_res["PFParticles"] = "ReconstructedParticles_{}".format(res_tag) + + df = df.Define( + collections_res["PFParticles"], + # type: 11 (electrons), 13 (muons), 130 (neutral hadrons), 22 (photon), 0 (charged hadrons), -1 (all) + # mode: 0 energy, 1 momentum + # parameters (scale, type, mode, debug) + # here re-smear only neutral hadrons (130) in energy mode + ROOT.SmearObjects.SmearedReconstructedParticle(sf, 130, 0, False), + [collections["PFParticles"], "reco_mc_index", collections["GenParticles"]], + ) + + ## run full sequence with energy nh smeared + df = jet_sequence(df, collections_res, output_branches, res_tag) + + ## 3. do dNdx smearing + + collections_dndx = deepcopy(collections) + dndx_tag = "dndx{}".format(sf).replace(".", "p") + collections_dndx["dNdx"] = "dNdx_{}".format(dndx_tag) + + df = df.Define( + collections_dndx["dNdx"], + ROOT.SmearObjects.SmearedTracksdNdx(sf, False), + [ + collections["PFParticles"], + collections["dNdx"], + collections["PathLength"], + "reco_mc_index", + collections["GenParticles"], + ], + ) + + ## run full sequence with energy nh smeared + df = jet_sequence(df, collections_dndx, output_branches, dndx_tag) + + + ## 4. do tof smearing + collections_tof = deepcopy(collections) + tof_tag = "tof{}".format(sf).replace(".", "p") + collections_tof["TrackerHits"] = "tof_{}".format(tof_tag) + + df = df.Define( + collections_tof["TrackerHits"], + ROOT.SmearObjects.SmearedTracksTOF(sf, False), + [ + collections["PFParticles"], + collections["PFTracks"], + collections["TrackerHits"], + collections["PathLength"], + "reco_mc_index", + collections["GenParticles"], + ], + ) + + ## run full sequence with energy nh smeared + df = jet_sequence(df, collections_tof, output_branches, tof_tag) + + return df + + # __________________________________________________________ + # Mandatory: output function, please make sure you return the branchlist as a python list + + def output(): + return output_branches diff --git a/examples/FCCee/smearing/smear_tracks.py b/examples/FCCee/smearing/smear_tracks.py new file mode 100644 index 0000000000..a85ca2f2b5 --- /dev/null +++ b/examples/FCCee/smearing/smear_tracks.py @@ -0,0 +1,51 @@ +# Optional test file +testFile = "/eos/experiment/fcc/ee/generation/DelphesEvents/winter2023/IDEA/p8_ee_Zbb_ecm91_EvtGen_Bs2DsK/events_017659734.root" + +import ROOT + + +# Mandatory: RDFanalysis class where the use defines the operations on the TTree +class RDFanalysis: + + # __________________________________________________________ + # Mandatory: analysers funtion to define the analysers to process, please make sure you return the last dataframe, in this example it is df2 + def analysers(df): + df2 = ( + df.Alias("Particle1", "Particle#1.index") + .Alias("MCRecoAssociations0", "MCRecoAssociations#0.index") + .Alias("MCRecoAssociations1", "MCRecoAssociations#1.index") + # matching between the RecoParticles and the MCParticles: + .Define( + "RP_MC_index", + "ReconstructedParticle2MC::getRP2MC_index(MCRecoAssociations0,MCRecoAssociations1,ReconstructedParticles)", + ) + # Generate a new set of tracks, re-scaling the covariance matrix + # order of the scaling factors : smear_d0, smear_phi, smear_omega, smear_z0, smear_tlambda + # the boolean flag is for debugging + .Define( + "SmearedTracks", + ROOT.SmearObjects.SmearedTracks(2.0, 2.0, 2.0, 2.0, 2.0, True), + ["ReconstructedParticles", "EFlowTrack_1", "RP_MC_index", "Particle"], + ) + # What follows is only needed for validation. + # Validation is made over the 1st track in each event + .Define( + "mcTrackParameters", + "SmearObjects::mcTrackParameters( ReconstructedParticles, EFlowTrack_1, RP_MC_index, Particle)", + ) + .Define("atrack_omega", "return EFlowTrack_1[0].omega ;") + .Define("atrack_omega_cov", "return EFlowTrack_1[0].covMatrix[5] ;") + .Define("smearTrack_omega", "return SmearedTracks[0].omega; ") + .Define("mcTrack_omega", "return mcTrackParameters[0].omega; ") + # events->Draw("(smearTrack_omega-mcTrack_omega)/TMath::Sqrt(atrack_omega_cov)" : should be gaussian with sigma = 2 + # events->Draw("(atrack_omega-mcTrack_omega)/TMath::Sqrt(atrack_omega_cov)") : should be gaussian with sigma = 1 + ) + return df2 + + # __________________________________________________________ + # Mandatory: output function, please make sure you return the branchlist as a python list + def output(): + branchList = ["atrack_omega", "atrack_omega_cov", "smearTrack_omega", "mcTrack_omega"] + branchList += ["SmearedTracks", "EFlowTrack_1", "ReconstructedParticles"] + + return branchList diff --git a/examples/FCCee/test/weaver_inference.py b/examples/FCCee/test/weaver_inference.py deleted file mode 100644 index d4e7b0aba1..0000000000 --- a/examples/FCCee/test/weaver_inference.py +++ /dev/null @@ -1,62 +0,0 @@ - -#Mandatory: List of processes -processList = { - #'p8_ee_ZH_ecm240':{'fraction':0.2, 'chunks':2, 'output':'p8_ee_ZH_ecm240_out'} - 'p8_noBES_ee_H_Hbb_ecm125':{'fraction':0.01, 'chunks':1, 'output':'test_out'} -} - -#Mandatory: Production tag when running over EDM4Hep centrally produced events, this points to the yaml files for getting sample statistics -prodTag = "FCCee/spring2021/IDEA/" - -#Optional: output directory, default is local running directory -outputDir = "." - -#Optional -nCPUS = 8 -runBatch = False -#batchQueue = "longlunch" -#compGroup = "group_u_FCC.local_gen" - -#Optional test file -testFile ="https://fccsw.web.cern.ch/fccsw/testsamples/p8_ee_ZH_ecm240_events_101027117.root" - -#Mandatory: RDFanalysis class where the use defines the operations on the TTree -class RDFanalysis(): - #__________________________________________________________ - #Mandatory: analysers funtion to define the analysers to process, please make sure you return the last dataframe, in this example it is df2 - def analysers(df): - from ROOT import JetFlavourUtils - from os import getenv - test_inputs_path = getenv('TEST_INPUT_DATA_DIR', '/afs/cern.ch/work/s/selvaggi/public/4Laurent/ONNX') - weaver = JetFlavourUtils.setup_weaver(test_inputs_path + '/fccee_flavtagging_dummy.onnx', - test_inputs_path + '/preprocess.json', - ('pfcand_e', 'pfcand_theta', 'pfcand_phi', 'pfcand_pid', 'pfcand_charge')) - - df2 = (df - # retrieve all information about jet constituents for each jet in collection - .Define("JetsConstituents", "JetConstituentsUtils::build_constituents(Jet, ReconstructedParticles)") - .Define("JC_e", "JetConstituentsUtils::get_e(JetsConstituents)") - .Define("JC_theta", "JetConstituentsUtils::get_theta(JetsConstituents)") - .Define("JC_phi", "JetConstituentsUtils::get_phi(JetsConstituents)") - .Define("JC_pid", "JetConstituentsUtils::get_type(JetsConstituents)") - .Define("JC_charge", "JetConstituentsUtils::get_charge(JetsConstituents)") - - # run inference - .Define("MVAVec", "JetFlavourUtils::get_weights(JC_e, JC_theta, JC_phi, JC_pid, JC_charge)") - - # recast output - .Define("Jet_isG", "JetFlavourUtils::get_weight(MVAVec, 0)") - .Define("Jet_isQ", "JetFlavourUtils::get_weight(MVAVec, 1)") - .Define("Jet_isS", "JetFlavourUtils::get_weight(MVAVec, 2)") - .Define("Jet_isC", "JetFlavourUtils::get_weight(MVAVec, 3)") - .Define("Jet_isB", "JetFlavourUtils::get_weight(MVAVec, 4)") - ) - return df2 - - #__________________________________________________________ - #Mandatory: output function, please make sure you return the branchlist as a python list - def output(): - branchList = [ - 'Jet_isG', 'Jet_isQ', 'Jet_isS', 'Jet_isC', 'Jet_isB', - ] - return branchList diff --git a/examples/FCCee/tutorials/vertexing/analysis_Bs2DsK_MCseeded.py b/examples/FCCee/tutorials/vertexing/analysis_Bs2DsK_MCseeded.py new file mode 100644 index 0000000000..075dedce5c --- /dev/null +++ b/examples/FCCee/tutorials/vertexing/analysis_Bs2DsK_MCseeded.py @@ -0,0 +1,355 @@ +#Optional test file +testFile="/eos/experiment/fcc/ee/generation/DelphesEvents/winter2023/IDEA/p8_ee_Zbb_ecm91_EvtGen_Bs2DsK/events_017659734.root" + + + +########################################################################################################## + +# +# Code dedicated to the Bs to Ds K analysis, not in central FCCAnalysis library +# + +########################################################################################################## + + +ReconstructedDs_code=''' + +// +// -- This method reconstructs the Ds pseudo-track using Franco's VertexMore. +// -- It returns a vector by convenience - but the vector only contains at most one TrackState. +// -- tracks in input = the DsTracks +// + +#include "FCCAnalyses/VertexFitterSimple.h" +#include "FCCAnalyses/VertexingUtils.h" + + +ROOT::VecOps::RVec ReconstructedDs( ROOT::VecOps::RVec tracks, + bool AddMassConstraints = false) { + + + ROOT::VecOps::RVec result; + + int Ntr = tracks.size(); + if ( Ntr != 3 ) return result; + + TVectorD** trkPar = new TVectorD*[Ntr]; + TMatrixDSym** trkCov = new TMatrixDSym*[Ntr]; + + bool Units_mm = true; + + for (Int_t i = 0; i < Ntr; i++) { + edm4hep::TrackState t = tracks[i] ; + TVectorD par = FCCAnalyses::VertexingUtils::get_trackParam( t, Units_mm ) ; + trkPar[i] = new TVectorD( par ); + TMatrixDSym Cov = FCCAnalyses::VertexingUtils::get_trackCov( t, Units_mm ); + trkCov[i] = new TMatrixDSym ( Cov ); + } + + VertexFit theVertexFit( Ntr, trkPar, trkCov ); + TVectorD x = theVertexFit.GetVtx() ; // this actually runs the fit + + VertexFit* vertexfit = &theVertexFit; + VertexMore vertexmore( vertexfit, Units_mm ); + + if ( AddMassConstraints ) { + + const double kaon_mass = 4.9367700e-01 ; + const double pion_mass = 0.140; + const double Ds_mass = 1.9683000e+00 ; + const double Phi_mass = 1.0194610e+00 ; + + double Ds_masses[3] = { kaon_mass, kaon_mass, pion_mass }; + int Ds_list[3] = { 0, 1, 2 }; + vertexmore.AddMassConstraint(Ds_mass, 3, Ds_masses, Ds_list); // Ds mass constraint + + /* + // Phi mass constraint: does not crash, but pulls of the resulting Bs vertex are bad. + double Phi_masses[2] = { kaon_mass, kaon_mass }; + int Phi_list[2] = { 0, 1 }; + vertexmore.AddMassConstraint(Phi_mass, 2, Phi_masses, Phi_list); // Phi mass constraint + */ + + vertexmore.MassConstrFit(); + } + + TVectorD Ds_track_param = vertexmore.GetVpar(); + TMatrixDSym cov = vertexmore.GetVcov(); + + TVectorD Ds_track_param_edm4hep = FCCAnalyses::VertexingUtils::Delphes2Edm4hep_TrackParam( Ds_track_param, Units_mm ); + edm4hep::TrackState track; + track.D0 = Ds_track_param_edm4hep[0] ; + track.phi = Ds_track_param_edm4hep[1]; + track.omega = Ds_track_param_edm4hep[2]; + track.Z0 = Ds_track_param_edm4hep[3] ; + track.tanLambda = Ds_track_param_edm4hep[4] ; + + // now the covariance matrix - lower-triangle : + + TMatrixDSym covM(5); + std::array covMatrix = FCCAnalyses::VertexingUtils::Delphes2Edm4hep_TrackCovMatrix( cov, Units_mm ) ; + + track.covMatrix = covMatrix ; + + result.push_back( track ); + + return result; +} +''' + + +########################################################################################################## + +Momentum_ReconstructedDs_code=''' + +#include "FCCAnalyses/VertexFitterSimple.h" +#include "FCCAnalyses/VertexingUtils.h" + +TVector3 Momentum_ReconstructedDs( edm4hep::TrackState Ds_pseudoTrack ) { + + TVectorD Param = FCCAnalyses::VertexingUtils::get_trackParam( Ds_pseudoTrack ); // track parameters, Franco's convention + TVector3 result = FCCAnalyses::VertexingUtils::ParToP( Param ); + + return result; + +} + +''' + + +########################################################################################################## + + +Tracks_for_the_Bs_vertex_code=''' +#include "FCCAnalyses/VertexingUtils.h" +#include "FCCAnalyses/ReconstructedParticle.h" + +ROOT::VecOps::RVec tracks_for_fitting_the_Bs_vertex( + ROOT::VecOps::RVec ReconstructedDs, + ROOT::VecOps::RVec BachelorKTrack) { + + ROOT::VecOps::RVec result; + if ( ReconstructedDs.size() != 1 ) return result; + if ( BachelorKTrack.size() != 1 ) return result; + + result.push_back( ReconstructedDs[0]) ; // the pseudo-Ds track + result.push_back( BachelorKTrack[0] ); // the bachelor K + + return result; +} +''' + + +########################################################################################################## + + +import ROOT +ROOT.gInterpreter.Declare(ReconstructedDs_code) +ROOT.gInterpreter.Declare(Momentum_ReconstructedDs_code) +ROOT.gInterpreter.Declare(Tracks_for_the_Bs_vertex_code) + + + +#Mandatory: RDFanalysis class where the use defines the operations on the TTree +class RDFanalysis(): + + #__________________________________________________________ + #Mandatory: analysers funtion to define the analysers to process, please make sure you return the last dataframe, in this example it is df2 + def analysers(df): + df2 = ( + df + + .Alias("Particle1", "Particle#1.index") + .Alias("MCRecoAssociations0", "MCRecoAssociations#0.index") + .Alias("MCRecoAssociations1", "MCRecoAssociations#1.index") + + # --------------------------------------------------------------------------------------- + # + # ----- Retrieve the indices of the MC Particles of interest + + # MC indices of the decay Bs_bar (PDG = -531) -> Ds+ (PDG = 431) K- (PDG = -321) + # Retrieves a vector of int's which correspond to indices in the Particle block + # vector[0] = the mother, and then the daughters in the order specified, i.e. here + # [1] = the Ds+, [2] = the K- + # Boolean arguments : + # 1st: stableDaughters. when set to true, the daughters specified in the list are looked + # for among the final, stable particles that come out from the mother, i.e. the decay tree is + # explored recursively if needed. + # 2nd: chargeConjugateMother + # 3rd: chargeConjugateDaughters + # 4th: inclusiveDecay: when set to false, if a mother is found, that decays + # into the particles specified in the list plus other particle(s), this decay is not selected. + # If the event contains more than one such decays,only the first one is kept. + .Define("Bs2DsK_indices", "MCParticle::get_indices( -531, {431, -321}, false, true, true, false) ( Particle, Particle1)" ) + + # select events for which the requested decay chain has been found: + .Filter("Bs2DsK_indices.size() > 0") + + .Define("Bs_MCindex", "return Bs2DsK_indices[0] ;" ) + .Define("Ds_MCindex", "return Bs2DsK_indices[1] ;" ) + .Define("BachelorK_MCindex", "return Bs2DsK_indices[2] ;" ) + + + # MC indices of (this) Ds+ -> K+ K- Pi+ + # Boolean arguments : + # 1st: stableDaughters. when set to true, the daughters specified in the list are looked + # for among the final, stable particles that come out from the mother, i.e. the decay tree is + # explored recursively if needed. + # 2nd: chargeConjugateDaughters + # 3rd: inclusiveDecay + .Define("Ds2KKPi_indices", "MCParticle::get_indices_MotherByIndex( Ds_MCindex, { 321, -321, 211 }, true, true, false, Particle, Particle1 )") + + .Define("Kplus_MCindex", "return Ds2KKPi_indices[1] ; ") + .Define("Kminus_MCindex", "return Ds2KKPi_indices[2] ; ") + .Define("Piplus_MCindex", "return Ds2KKPi_indices[3] ; ") + + # --------------------------------------------------------------------------------------- + + + # --------------------------------------------------------------------------------------- + # ----- The MC Particles : + + # the MC Bs : + .Define("Bs", "return Particle[ Bs_MCindex ]; ") + # the MC Ds : + .Define("Ds", "return Particle[ Ds_MCindex ]; ") + # the MC bachelor K- from the Bs decay : + .Define("BachelorK", "return Particle[ BachelorK_MCindex ]; " ) + # The MC legs from the Ds decay + .Define("Kplus", "return Particle[ Kplus_MCindex ] ; ") + .Define("Kminus", "return Particle[ Kminus_MCindex ] ; ") + .Define("Piplus", "return Particle[ Piplus_MCindex ] ; ") + + # some MC-truth kinematic quantities: + .Define("BachelorK_px", "return BachelorK.momentum.x ;") + .Define("BachelorK_py", "return BachelorK.momentum.y ;") + .Define("BachelorK_pz", "return BachelorK.momentum.z ;") + + .Define("Ds_px", "return Ds.momentum.x ;") + .Define("Ds_py", "return Ds.momentum.y ;") + .Define("Ds_pz", "return Ds.momentum.z ;") + .Define("Ds_pt", "ROOT::VecOps::RVec v; v.push_back( Ds ); return MCParticle::get_pt(v ) ;") + + + # --------------------------------------------------------------------------------------- + + + # --------------------------------------------------------------------------------------- + # ----- The MC-truth decay vertices of the Bs and of the Ds + + # Note: in case the Bs or Bsbar has oscillated before it decays, the vertex returned + # below is the decay vertex of the Bs after oscillation. + + # MC Decay vertex of the Bs = the production vertex of the Bachelor K + .Define("BsMCDecayVertex", "return BachelorK.vertex; " ) + # MC Decay vertex of the Ds = the production vertex of the Kplus + .Define("DsMCDecayVertex", "return Kplus.vertex ; " ) + + # --------------------------------------------------------------------------------------- + + + # --------------------------------------------------------------------------------------- + # ----- The RecoParticles that are MC-matched with the particles of the Ds decay + + # RecoParticles associated with the Ds decay + # Note: the size of DsRecoParticles below is always 3 provided that Ds2KKPi_indices is not empty. + # possibly including "dummy" particles in case one of the legs did not make a RecoParticle + # (e.g. because it is outside the tracker acceptance). + # This is done on purpose, in order to maintain the mapping with the indices - i.e. the 1st particle in + # the list BsRecoParticles is the Kminus, then the Kplus, then the Piplus. + # (selRP_matched_to_list ignores the unstable MC particles that are in the input list of indices + # hence the mother particle, which is the [0] element of the Ds2KKPi_indices vector). + # + # The matching between RecoParticles and MCParticles requires 4 collections. For more + # detail, see https://github.com/HEP-FCC/FCCAnalyses/tree/master/examples/basics + + .Define("DsRecoParticles", " ReconstructedParticle2MC::selRP_matched_to_list( Ds2KKPi_indices, MCRecoAssociations0,MCRecoAssociations1,ReconstructedParticles,Particle)") + + # the corresponding tracks - here, dummy particles, if any, are removed + .Define("DsTracks", "ReconstructedParticle2Track::getRP2TRK( DsRecoParticles, EFlowTrack_1)" ) + + # number of tracks used to reconstruct the Ds vertex + .Define("n_DsTracks", "ReconstructedParticle2Track::getTK_n( DsTracks )") + + # --------------------------------------------------------------------------------------- + # ------ Reco'ed vertex of the Ds + + # The index "3" below is just to indicate that this is a "tertiary" vertex, it has no influence + # on the vertex fitting. + # Note: no mass constraint is applied here. See the code of "ReconstructedDs" for an example + # of how mass constraints can be applied. + .Define("DsVertexObject", "VertexFitterSimple::VertexFitter_Tk( 3, DsTracks)" ) + .Define("DsVertex", "VertexingUtils::get_VertexData( DsVertexObject )") + + # ------------------------------------------------------------------------------------------------------- + + + # ------------------------------------------------------------------------------------------------------- + # ---------- Reconstruction of the Bs vertex + + # The Ds pseudoTrack (TrackState) - returned as a vector of TrackState, with one single element : + # boolean = true: add Ds mass constraint in the Ds vertex fit. + # Default = false (no mass constraint applied) + .Define("v_Ds_PseudoTrack", "ReconstructedDs( DsTracks, true )") + + # Number of Ds pseudoTracks. In principle it should always be equal to one at this stage. + .Define("n_pdt", "ReconstructedParticle2Track::getTK_n( v_Ds_PseudoTrack )") + # but it is explicitely required to be non zero, otherwise the code that comes next would crash: + .Filter("n_pdt > 0") + + # The momentum vector (TVector3) of the Ds : + .Define("Ds_momentum", "Momentum_ReconstructedDs( v_Ds_PseudoTrack[0] ) ") + .Define("RecoDs_px", "return Ds_momentum.x() ") + .Define("RecoDs_py", "return Ds_momentum.y() ") + .Define("RecoDs_pz", "return Ds_momentum.z() ") + + + # the RecoParticle associated with the bachelor K + .Define("BsRecoParticles", "ReconstructedParticle2MC::selRP_matched_to_list( Bs2DsK_indices, MCRecoAssociations0,MCRecoAssociations1,ReconstructedParticles,Particle)") + .Define("RecoBachelorK", " return BsRecoParticles[0] ; ") # only the bachelor K is stable, among the indices of Bs2DsK_indices + + .Define("RecoBachelorK_px", "return RecoBachelorK.momentum.x; ") + .Define("RecoBachelorK_py", "return RecoBachelorK.momentum.y; ") + .Define("RecoBachelorK_pz", "return RecoBachelorK.momentum.z; ") + + + # and the corresponding track + .Define("v_RecoBachelorK", "ROOT::VecOps::RVec v; v.push_back( RecoBachelorK ); return v; ") + .Define("v_BachelorKTrack", "ReconstructedParticle2Track::getRP2TRK( v_RecoBachelorK, EFlowTrack_1)" ) + + # Now we have the two tracks that we need for the Bs vertex : + .Define("BsTracks", "tracks_for_fitting_the_Bs_vertex( v_Ds_PseudoTrack, v_BachelorKTrack) ") + + + # --------------------------------------------------------------------------------------- + # ------ Reco'ed vertex of the Bs + + .Define("BsVertexObject", "VertexFitterSimple::VertexFitter_Tk( 2, BsTracks )" ) + .Define("n_BsTracks", "ReconstructedParticle2Track::getTK_n( BsTracks )") + + # This is the final Bs vertex + .Define("BsVertex", "VertexingUtils::get_VertexData( BsVertexObject )") + + + ) + return df2 + + + #__________________________________________________________ + #Mandatory: output function, please make sure you return the branchlist as a python list + def output(): + branchList = [ + "DsMCDecayVertex", + "BsMCDecayVertex", + "n_DsTracks", + "DsVertex", + "Ds_momentum", + "n_BsTracks", + "BsVertex", + "BachelorK_px", "BachelorK_py", "BachelorK_pz", + "RecoBachelorK_px","RecoBachelorK_py","RecoBachelorK_pz", + "Ds_px", "Ds_py", "Ds_pz","Ds_pt", + "RecoDs_px", "RecoDs_py", "RecoDs_pz", + + ] + return branchList diff --git a/examples/FCCee/tutorials/vertexing/analysis_primary_vertex.py b/examples/FCCee/tutorials/vertexing/analysis_primary_vertex.py index 3472492e2d..e95fcd58d3 100644 --- a/examples/FCCee/tutorials/vertexing/analysis_primary_vertex.py +++ b/examples/FCCee/tutorials/vertexing/analysis_primary_vertex.py @@ -40,7 +40,7 @@ def analysers(df): # This is not a good estimate of the primary vertex: even in a Z -> uds event, there are displaced tracks (e.g. Ks, Lambdas), which would bias the fit. # Below, we determine the "primary tracks" using an iterative algorithm - cf LCFI+. - .Define("RecoedPrimaryTracks", "VertexFitterSimple::get_PrimaryTracks( VertexObject_allTracks, EFlowTrack_1, true, 4.5, 20e-3, 300, 0., 0., 0., 0)") + .Define("RecoedPrimaryTracks", "VertexFitterSimple::get_PrimaryTracks( EFlowTrack_1, true, 4.5, 20e-3, 300, 0., 0., 0.)") # Now we run again the vertex fit, but only on the primary tracks : .Define("PrimaryVertexObject", "VertexFitterSimple::VertexFitter_Tk ( 1, RecoedPrimaryTracks, true, 4.5, 20e-3, 300) ") diff --git a/examples/FCCee/tutorials/vertexing/plots_Bs2DsK.x b/examples/FCCee/tutorials/vertexing/plots_Bs2DsK.x new file mode 100644 index 0000000000..23ec54c390 --- /dev/null +++ b/examples/FCCee/tutorials/vertexing/plots_Bs2DsK.x @@ -0,0 +1,55 @@ +{ + +// ------------------------------------------------------------------ + +// Ds vertex fit + +TString cut = "n_DsTracks==3"; + +// normalised chi2 of the fit : (Ndf = 2 x Ntracks - 3 ) +events->Draw("DsVertex.chi2", cut); + +// resolutions in x : +events->Draw("(DsVertex.position.x-DsMCDecayVertex.x)",cut) ; + +// pulls of the fitted vertex position : +events->Draw("(DsVertex.position.x-DsMCDecayVertex.x)/TMath::Sqrt( DsVertex.covMatrix[0] )",cut); +events->Draw("(DsVertex.position.y-DsMCDecayVertex.y)/TMath::Sqrt( DsVertex.covMatrix[2] )",cut) ; +events->Draw("(DsVertex.position.z-DsMCDecayVertex.z)/TMath::Sqrt( DsVertex.covMatrix[5] )",cut) ; + + +// ------------------------------------------------------------------ + + +// Bs vertex fit + +cut = "n_DsTracks==3 && n_BsTracks ==2" ; + + +// normalised chi2 : +events->Draw("BsVertex.chi2", cut); + +// resolutions: +events->Draw("(BsVertex.position.x-BsMCDecayVertex.x)",cut) ; + +// pulls in x : +events->Draw("(BsVertex.position.x-BsMCDecayVertex.x)/TMath::Sqrt( BsVertex.covMatrix[0] )",cut); + +// pulls of the flight distance : + +TString fld_mm = "TMath::Sqrt( pow( BsVertex.position.x, 2) + pow( BsVertex.position.y,2) + pow( BsVertex.position.z,2))"; +TString fld_gen_mm = "TMath::Sqrt( pow( BsMCDecayVertex.x[0], 2) + pow( BsMCDecayVertex.y[0],2) + pow( BsMCDecayVertex.z[0],2) )"; +TString fld_res_mm = fld_mm + " - " + fld_gen_mm; +TString term1 = " BsVertex.position.x * ( BsVertex.covMatrix[0] * BsVertex.position.x + BsVertex.covMatrix[1] * BsVertex.position.y + BsVertex.covMatrix[3] * BsVertex.position.z ) " ; +TString term2 = " BsVertex.position.y * ( BsVertex.covMatrix[1] * BsVertex.position.x + BsVertex.covMatrix[2] * BsVertex.position.y + BsVertex.covMatrix[4] * BsVertex.position.z ) " ; +TString term3 = " BsVertex.position.z * ( BsVertex.covMatrix[3] * BsVertex.position.x + BsVertex.covMatrix[4] * BsVertex.position.y + BsVertex.covMatrix[5] * BsVertex.position.z ) "; +TString tsum = term1 + " + " + term2 + " + " + term3; +TString fld_unc = " ( TMath::Sqrt( " + tsum + ") / " + fld_mm +" ) "; +TString fld_pull = "( " + fld_res_mm + " ) / " + fld_unc; +events->Draw(fld_pull , cut); + + + + +} + diff --git a/examples/FCCee/vertex/analysis.py b/examples/FCCee/vertex/analysis.py index 0efa96d9ae..1a3ebfcfce 100644 --- a/examples/FCCee/vertex/analysis.py +++ b/examples/FCCee/vertex/analysis.py @@ -84,9 +84,10 @@ def run(self): # --- now, determime the primary (and secondary) tracks without using the MC-matching: # First, reconstruct a vertex from all tracks - .Define("VertexObject_allTracks", "VertexFitterSimple::VertexFitter_Tk ( 1, EFlowTrack_1, true, 4.5, 20e-3, 300)") + #.Define("VertexObject_allTracks", "VertexFitterSimple::VertexFitter_Tk ( 1, EFlowTrack_1, true, 4.5, 20e-3, 300)") # Select the tracks that are reconstructed as primaries - .Define("RecoedPrimaryTracks", "VertexFitterSimple::get_PrimaryTracks( VertexObject_allTracks, EFlowTrack_1, true, 4.5, 20e-3, 300, 0., 0., 0., 0)") + .Define("RecoedPrimaryTracks", "VertexFitterSimple::get_PrimaryTracks( EFlowTrack_1, true, 4.5, 20e-3, 300, 0., 0., 0.)") + .Define("n_RecoedPrimaryTracks", "ReconstructedParticle2Track::getTK_n( RecoedPrimaryTracks )") # the final primary vertex : .Define("FinalVertexObject", "VertexFitterSimple::VertexFitter_Tk ( 1, RecoedPrimaryTracks, true, 4.5, 20e-3, 300) ") diff --git a/examples/FCCee/vertex_lcfiplus/analysis_SV.py b/examples/FCCee/vertex_lcfiplus/analysis_SV.py new file mode 100644 index 0000000000..cf7a7b7930 --- /dev/null +++ b/examples/FCCee/vertex_lcfiplus/analysis_SV.py @@ -0,0 +1,134 @@ +testFile="root://eospublic.cern.ch//eos/experiment/fcc/ee/generation/DelphesEvents/winter2023/IDEA/p8_ee_Zbb_ecm91/events_066726720.root" + +#Mandatory: List of processes +processList = { + 'p8_ee_Zuds_ecm91':{'fraction':0.0001}, #Run 0.01% statistics in one output file named /p8_ee_Zuds_ecm91.root + #'p8_ee_Zcc_ecm91':{'fraction':0.0001}, #Run 0.01% statistics in one output file named /p8_ee_Zcc_ecm91.root + #'p8_ee_Zbb_ecm91':{'fraction':0.0001, 'output':'p8_ee_Zbb_ecm91_SV_100K'}, +} + +#Mandatory: Production tag when running over EDM4Hep centrally produced events, this points to the yaml files for getting sample statistics +prodTag = "FCCee/spring2021/IDEA/" + +#Optional: output directory, default is local running directory +outputDir = "outputs/FCCee/KG/" + +#Optional: ncpus, default is 4 +nCPUS = 1 + +#Optional running on HTCondor, default is False +#runBatch = True + +#Optional batch queue name when running on HTCondor, default is workday +#batchQueue = "microcentury" + +#Optional computing account when running on HTCondor, default is group_u_FCC.local_gen +#compGroup = "group_u_FCC.local_gen" + +#Mandatory: RDFanalysis class where the use defines the operations on the TTree +class RDFanalysis(): + + #__________________________________________________________ + #Mandatory: analysers funtion to define the analysers to process, please make sure you return the last dataframe, in this example it is df2 + def analysers(df): + df2 = (df + # number of tracks + .Define("ntracks","ReconstructedParticle2Track::getTK_n(EFlowTrack_1)") + + + ##### + # determime the primary (and secondary) tracks without using the MC-matching: + + # Select the tracks that are reconstructed as primaries + .Define("RecoedPrimaryTracks", "VertexFitterSimple::get_PrimaryTracks( EFlowTrack_1, true, 4.5, 20e-3, 300, 0., 0., 0.)") + + .Define("n_RecoedPrimaryTracks", "ReconstructedParticle2Track::getTK_n( RecoedPrimaryTracks )") + # the final primary vertex : + .Define("PrimaryVertexObject", "VertexFitterSimple::VertexFitter_Tk ( 1, RecoedPrimaryTracks, true, 4.5, 20e-3, 300) ") + .Define("PrimaryVertex", "VertexingUtils::get_VertexData( PrimaryVertexObject )") + + # the secondary tracks + .Define("SecondaryTracks", "VertexFitterSimple::get_NonPrimaryTracks( EFlowTrack_1, RecoedPrimaryTracks )") + .Define("n_SecondaryTracks", "ReconstructedParticle2Track::getTK_n( SecondaryTracks )" ) + + # which of the tracks are primary according to the reco algprithm (boolean) + .Define("IsPrimary_based_on_reco", "VertexFitterSimple::IsPrimary_forTracks( EFlowTrack_1, RecoedPrimaryTracks )") + + # jet clustering (ee-kt) before reconstructing SVs in event + .Define("RP_px", "ReconstructedParticle::get_px(ReconstructedParticles)") + .Define("RP_py", "ReconstructedParticle::get_py(ReconstructedParticles)") + .Define("RP_pz", "ReconstructedParticle::get_pz(ReconstructedParticles)") + .Define("RP_e", "ReconstructedParticle::get_e(ReconstructedParticles)") + #build psedo-jets with the Reconstructed final particles + .Define("pseudo_jets", "JetClusteringUtils::set_pseudoJets(RP_px, RP_py, RP_pz, RP_e)") + #run jet clustering with all reco particles. ee_kt_algorithm, exclusive clustering, exactly 2 jets, E-scheme + .Define("FCCAnalysesJets_ee_kt", "JetClustering::clustering_ee_kt(2, 2, 1, 0)(pseudo_jets)") + #get the jets out of the structure + .Define("jets_ee_kt", "JetClusteringUtils::get_pseudoJets(FCCAnalysesJets_ee_kt)") + #get the jet constituents out of the structure + .Define("jetconstituents_ee_kt", "JetClusteringUtils::get_constituents(FCCAnalysesJets_ee_kt)") + + + # finding SVs in jets + .Define("SV_jet", "VertexFinderLCFIPlus::get_SV_jets(ReconstructedParticles, EFlowTrack_1, PrimaryVertexObject, IsPrimary_based_on_reco, jets_ee_kt, jetconstituents_ee_kt)") + # finding SVs in the event (two interfaces) + #.Define("SV_evt1", "VertexFinderLCFIPlus::get_SV_event(ReconstructedParticles, EFlowTrack_1, PrimaryVertexObject, IsPrimary_based_on_reco)") + #.Define("SV_evt2", "VertexFinderLCFIPlus::get_SV_event(SecondaryTracks, EFlowTrack_1, PrimaryVertexObject)") + + # multiplicity + #.Define("SV_evt2_n","VertexingUtils::get_n_SV(SV_evt2)") + #.Define("SV_evt1_n","VertexingUtils::get_n_SV(SV_evt1)") + .Define("SV_jet_n", "VertexingUtils::get_n_SV(SV_jet)") + # vertex position + .Define("SV_jet_position", "VertexingUtils::get_position_SV( SV_jet )") + #.Define("SV_evt1_position", "VertexingUtils::get_position_SV( SV_evt1 )") + #.Define("SV_evt2_position", "VertexingUtils::get_position_SV( SV_evt2 )") + + # more SV properties + .Define("sv_mass", "VertexingUtils::get_invM(SV_jet)") # SV mass + # .Define("sv_p", "VertexingUtils::get_pMag_SV(SV_jet)") # SV momentum (magnitude) + # .Define("sv_ntracks", "VertexingUtils::get_VertexNtrk(SV_jet)") # SV daughters (no of tracks) + # .Define("sv_chi2", "VertexingUtils::get_chi2_SV(SV_jet)") # SV chi2 (not normalised) + # .Define("sv_normchi2","VertexingUtils::get_norm_chi2_SV(SV_jet)") # SV chi2 (normalised) + # .Define("sv_ndf", "VertexingUtils::get_nDOF_SV(SV_jet)") # SV no of DOF + # .Define("sv_theta", "VertexingUtils::get_theta_SV(SV_jet)") # SV polar angle (theta) + # .Define("sv_phi", "VertexingUtils::get_phi_SV(SV_jet)") # SV azimuthal angle (phi) + + ) + return df2 + + #__________________________________________________________ + #Mandatory: output function, please make sure you return the branchlist as a python list + def output(): + branchList = [ + # primary vertex and primary tracks w/o any MC-matching : + 'IsPrimary_based_on_reco', + 'PrimaryVertex', + + # vector of SV vertex objects + 'SV_jet', + #'SV_evt1', + #'SV_evt2', + + # SV multiplicity + #'SV_evt1_n', + #'SV_evt2_n', + 'SV_jet_n', + + # SV position + 'SV_jet_position', + #'SV_evt1_position', + #'SV_evt2_position', + + # more SV variables + 'sv_mass', + # 'sv_p', + # 'sv_ntracks', + # 'sv_chi2', + # 'sv_normchi2', + # 'sv_ndf', + # 'sv_theta', + # 'sv_phi', + + ] + return branchList diff --git a/examples/FCCee/vertex_lcfiplus/analysis_V0.py b/examples/FCCee/vertex_lcfiplus/analysis_V0.py new file mode 100644 index 0000000000..d6bcc35505 --- /dev/null +++ b/examples/FCCee/vertex_lcfiplus/analysis_V0.py @@ -0,0 +1,148 @@ +testFile="root://eospublic.cern.ch//eos/experiment/fcc/ee/generation/DelphesEvents/winter2023/IDEA/p8_ee_Zbb_ecm91/events_066726720.root" + +#Mandatory: List of processes +processList = { + 'p8_ee_Zuds_ecm91':{'fraction':0.0001}, #Run 0.01% statistics in one output file named /p8_ee_Zuds_ecm91.root +} + +#Mandatory: Production tag when running over EDM4Hep centrally produced events, this points to the yaml files for getting sample statistics +prodTag = "FCCee/spring2021/IDEA/" + +#Optional: output directory, default is local running directory +outputDir = "outputs/FCCee/KG" + +#Optional: ncpus, default is 4 +nCPUS = 8 + +#Optional running on HTCondor, default is False +#runBatch = False + +#Optional batch queue name when running on HTCondor, default is workday +#batchQueue = "longlunch" + +#Optional computing account when running on HTCondor, default is group_u_FCC.local_gen +#compGroup = "group_u_FCC.local_gen" + +#Mandatory: RDFanalysis class where the use defines the operations on the TTree +class RDFanalysis(): + + #__________________________________________________________ + #Mandatory: analysers funtion to define the analysers to process, please make sure you return the last dataframe, in this example it is df2 + def analysers(df): + df2 = ( + df + .Alias("Particle1", "Particle#1.index") + + # get all the MC particles to check for Ks + .Define("MC_pdg", "FCCAnalyses::MCParticle::get_pdg(Particle)") + # get momenta & mass of all particles + .Define("MC_p4", "FCCAnalyses::MCParticle::get_tlv(Particle)") + .Define("MC_mass", "FCCAnalyses::MCParticle::get_mass(Particle)") + + # Ks -> pi+pi- + .Define("K0spipi_indices", "FCCAnalyses::MCParticle::get_indices_ExclusiveDecay(310, {211, -211}, true, true) (Particle, Particle1)") + # Lambda0 -> p+pi- + .Define("Lambda0ppi_indices", "FCCAnalyses::MCParticle::get_indices_ExclusiveDecay(3122, {2212, -211}, true, true) (Particle, Particle1)") + + # determime the primary (and secondary) tracks without using the MC-matching: + + # Select the tracks that are reconstructed as primaries + .Define("RecoedPrimaryTracks", "VertexFitterSimple::get_PrimaryTracks( EFlowTrack_1, true, 4.5, 20e-3, 300, 0., 0., 0.)") + + .Define("n_RecoedPrimaryTracks", "ReconstructedParticle2Track::getTK_n( RecoedPrimaryTracks )") + # the final primary vertex : + .Define("PrimaryVertexObject", "VertexFitterSimple::VertexFitter_Tk ( 1, RecoedPrimaryTracks, true, 4.5, 20e-3, 300) ") + .Define("PrimaryVertex", "VertexingUtils::get_VertexData( PrimaryVertexObject )") + + # the secondary tracks + .Define("SecondaryTracks", "VertexFitterSimple::get_NonPrimaryTracks( EFlowTrack_1, RecoedPrimaryTracks )") + .Define("n_SecondaryTracks", "ReconstructedParticle2Track::getTK_n( SecondaryTracks )" ) + + # which of the tracks are primary according to the reco algprithm + .Define("IsPrimary_based_on_reco", "VertexFitterSimple::IsPrimary_forTracks( EFlowTrack_1, RecoedPrimaryTracks )") + + # jet clustering + .Define("RP_px", "ReconstructedParticle::get_px(ReconstructedParticles)") + .Define("RP_py", "ReconstructedParticle::get_py(ReconstructedParticles)") + .Define("RP_pz", "ReconstructedParticle::get_pz(ReconstructedParticles)") + .Define("RP_m", "ReconstructedParticle::get_mass(ReconstructedParticles)") + # build psedo-jets with the Reconstructed final particles + .Define("pseudo_jets", "JetClusteringUtils::set_pseudoJets_xyzm(RP_px, RP_py, RP_pz, RP_m)") + # run jet clustering with all reco particles. ee_kt_algorithm, exclusive clustering, exactly 2 jets, E-scheme + .Define("FCCAnalysesJets_ee_kt", "JetClustering::clustering_ee_kt(2, 2, 1, 0)(pseudo_jets)") + # get the jets out of the structure + .Define("jets_ee_kt", "JetClusteringUtils::get_pseudoJets(FCCAnalysesJets_ee_kt)") + # get the jet constituents out of the structure + .Define("jetconstituents", "JetClusteringUtils::get_constituents(FCCAnalysesJets_ee_kt)") + + + # find V0s + #.Define("V0_evt", "VertexFinderLCFIPlus::get_V0s(SecondaryTracks, PrimaryVertexObject, true)") + .Define("V0", "VertexFinderLCFIPlus::get_V0s_jet(ReconstructedParticles, EFlowTrack_1, IsPrimary_based_on_reco, jets_ee_kt, jetconstituents, PrimaryVertexObject)") + .Define("V0_jet", "VertexingUtils::get_svInJets(V0.vtx, V0.nSV_jet)") + # get pdg vector out + #.Define("V0_pdg", "VertexingUtils::get_pdg_V0(V0)") + .Define("V0_pdg", "VertexingUtils::get_pdg_V0(V0.pdgAbs, V0.nSV_jet)") + # get invariant mass vector out + .Define("V0_invM", "VertexingUtils::get_invM_V0(V0.invM, V0.nSV_jet)") + # get the position + .Define("V0_pos", "VertexingUtils::get_position_SV(V0_jet)") + # get the chi2 + #.Define("V0_chi2", "VertexingUtils::get_chi2_SV(V0)") + .Define("V0_chi2", "VertexingUtils::get_chi2_SV(V0_jet)") + # get the momenta + .Define("V0_p", "VertexingUtils::get_p_SV(V0_jet)") + + # more V0 properties + # .Define("v0_pid", "VertexingUtils::get_pdg_V0(V0.pdgAbs, V0.nSV_jet)") # V0 pdg id + # .Define("v0_mass", "VertexingUtils::get_invM_V0(V0.invM, V0.nSV_jet)") # V0 invariant mass + # .Define("v0_p", "VertexingUtils::get_pMag_SV(V0_jet)") # V0 momentum (magnitude) + # .Define("v0_ntracks", "VertexingUtils::get_VertexNtrk(V0_jet)") # V0 daughters (no of tracks) + # .Define("v0_chi2", "VertexingUtils::get_chi2_SV(V0_jet)") # V0 chi2 (not normalised) + # .Define("v0_normchi2","VertexingUtils::get_norm_chi2_SV(V0_jet)") # V0 chi2 (normalised but same as above) + # .Define("v0_ndf", "VertexingUtils::get_nDOF_SV(V0_jet)") # V0 no of DOF (always 1) + # .Define("v0_theta", "VertexingUtils::get_theta_SV(V0_jet)") # V0 polar angle (theta) + # .Define("v0_phi", "VertexingUtils::get_phi_SV(V0_jet)") # V0 azimuthal angle (phi) + + ) + return df2 + + #__________________________________________________________ + #Mandatory: output function, please make sure you return the branchlist as a python list + def output(): + branchList = [ + # MC particles + "MC_pdg", + "MC_p4", + "MC_mass", + + # Ks -> pi+pi- & Lambda0->p+pi- + "K0spipi_indices", + "Lambda0ppi_indices", + + # primary vertex and primary tracks w/o any MC-matching : + "IsPrimary_based_on_reco", + "PrimaryVertex", + + # V0 object + "V0", + "V0_jet", + "V0_pdg", + "V0_invM", + "V0_pos", + "V0_chi2", + "V0_p", + + # more V0 properties + # 'v0_pid', + # 'v0_mass', + # 'v0_p', + # 'v0_ntracks', + # 'v0_chi2', + # 'v0_normchi2', + # 'v0_ndf', + # 'v0_theta', + # 'v0_phi', + + ] + return branchList diff --git a/examples/FCCee/vertex_perf/analysis.py b/examples/FCCee/vertex_perf/analysis.py index 265012e76e..152a3ab227 100644 --- a/examples/FCCee/vertex_perf/analysis.py +++ b/examples/FCCee/vertex_perf/analysis.py @@ -4,8 +4,6 @@ print ("Load cxx analyzers ... ",) ROOT.gSystem.Load("libedm4hep") ROOT.gSystem.Load("libpodio") -ROOT.gSystem.Load("libawkward") -ROOT.gSystem.Load("libawkward-cpu-kernels") ROOT.gSystem.Load("libFCCAnalyses") ROOT.gErrorIgnoreLevel = ROOT.kFatal diff --git a/examples/FCCee/weaver/README.md b/examples/FCCee/weaver/README.md index 268e2fa803..583387c19c 100644 --- a/examples/FCCee/weaver/README.md +++ b/examples/FCCee/weaver/README.md @@ -1,32 +1,37 @@ # Quick tour -## stage 1: produce event based tree +## stage 1: produce event based tree (Whizard or Pythia8) ``` -fccanalysis run stage1.py --output stage1_cc.root --files-list /eos/experiment/fcc/ee/generation/DelphesEvents/pre_fall2022_training/IDEA/p8_ee_ZH_Znunu_Hcc_ecm240/events_663529174.root +### test produce input files for training (WHIZARD) +fccanalysis run examples/FCCee/weaver/stage1.py --output test_Hss.root --files-list /eos/experiment/fcc/ee/generation/DelphesEvents/winter2023/IDEA/wzp6_ee_nunuH_Hss_ecm240/events_196755633.root --ncpus 64 ``` ## stage 2: produce jet based tree ``` -g++ -o stage2 stage2.cpp `root-config --cflags --libs` -Wall -./stage2 stage1_cc.root stage2_cc.root 0 10000 +python examples/FCCee/weaver/stage2.py test_Hss.root out_Hss.root 0 100 ``` ## run all stages in one go: ``` -python all_stages.py --fsplit 0.9 +### whizard +python examples/FCCee/weaver/stage_all.py --indir /eos/experiment/fcc/ee/generation/DelphesEvents/winter2023_training/IDEA/ --outdir /tmp/selvaggi/data/pre_winter2023_tests_v2/ --fsplit 0.9 --sample wzp6_ee_nunuH --ncpus 64 --nev 100000 ``` -## run inference +## run validation plots ``` -fccanalysis run analysis_inference.py --output inference.root --files-list /eos/experiment/fcc/ee/generation/DelphesEvents/pre_fall2022_training/IDEA/p8_ee_ZH_Znunu_Hcc_ecm240/events_663529174.root +python examples/FCCee/weaver/stage_plots.py --indir /tmp/selvaggi/data/winter2023_training_test/selvaggi_2023Mar01/ --outdir /eos/user/s/selvaggi/www/test_tag2 ``` +## test inference +``` +fccanalysis run examples/FCCee/weaver/analysis_inference.py --output test_ss.root --files-list /eos/experiment/fcc/ee/generation/DelphesEvents/winter2023/IDEA/wzp6_ee_nunuH_Hss_ecm240/events_196755633.root --ncpus 64 +``` # Preparation of dataset ## Generated samples -The samples used are stored in the directory `/eos/experiment/fcc/ee/generation/DelphesEvents/pre_fall2022_training/IDEA/` . -The events were simulated using Delphes. -The processes considered are $e^+ e^- \to Z(\to \nu \nu) H(\to aa)$ with $j = u,d,b,c,s,g$. +The samples used are stored in the directory `/eos/experiment/fcc/ee/generation/DelphesEvents/winter2023_training/IDEA/` . +The events were simulated using Delphes. +The processes considered are $e^+ e^- \to Z(\to \nu \nu) H(\to jj)$ with $j = u,d,b,c,s,g$. For the processes $j = u,d,b,c,s$ samples of $\sim 10^6$ events were produced (i.e. $2 \times 10^6$ jets per sample), for $j = g$ $\sim 2 \times 10^6$ (i.e. $2 \times 10^6$ jets per sample). Beamspot of 20 um size on Y-axis and 600 um on Z-axis was set. Tre tree containing the events in the input .root file is called _events_ . @@ -36,26 +41,26 @@ Tre tree containing the events in the input .root file is called _events_ . During the training we want ParticleNet to learn to identify a jet from its properties. This means that each entry of the training dataset should contain the properties of one jet (and of its constituents) which are significant for the discrimination only; furthermore, these properties should be organized in a format accessible to ParticleNet: *arrays*. However, the samples generated -* have a per-event structure, +* have a per-event structure, * each event contains way more information than the needed one for the training, -* each event is saved in edm4hep format +* each event is saved in edm4hep format So, before performing the training three actions are required: 1. read the generated samples in edm4hep format(through fccanalysis), 2. extract/compute the features of interest (through fccanalysis), 3. produce the ntuples (one per class) containing the interesting features. -In our case, the first two actions are executed by `stage1.py` and the third by `stage2.cpp` . -Since we are interested in the final ntuple, these two codes are executed jointly by `all_stages.py`, optimizing the times through the usage of multiprocessing. +In our case, the first two actions are executed by `stage1.py` and the third by `stage2.py` . +Since we are interested in the final ntuple, these two codes are executed jointly by `stage_all.py`, optimizing the times through the usage of multiprocessing. So the production of the training dataset from the generated samples is performed in two steps, which in the folloing will be referred to as _stage1_ and _stage2_. Even though the joint action of the two steps, an intermediate file is produced by _stage1_, which will be saved in the ouptut directory, together with the final ntuples but with a recognizable name. -For what concerns time: +For what concerns time: * _stage1_ takes $\sim 3-4$ minutes per $10^6$ events (run on 8 cpus); * _stage2_ takes $\sim 5$ minutes per $10^6$ events (run on 1 cpu). For what concerns memory usage: -* intermediate files weight $\sim 4$ Gb per $10^6$ events; +* intermediate files weight $\sim 4$ Gb per $10^6$ events; * final files weight $\sim 4$ Gb per $10^6$ events; In our case the directory containing the intermediate and final files weights $\sim 50$ Gb . @@ -63,9 +68,9 @@ We notice that the intermediate files could be deleted after the production of t ### Stage1 : `stage1.py` All the namespaces used are defined and developed inside the folder `analyzers`. -In particular, in JetConstituentsUtils we developed functions to compute the constituents features and modified ReconstructedParticle2Track in order to return the value $-9$ for particles (neutral) nothaving a track (the value was chosen arbitrarily, could be changed). +In particular, in JetConstituentsUtils we developed functions to compute the constituents features and modified ReconstructedParticle2Track in order to return the value $-9$ for particles (neutral) not having a track (the value was chosen arbitrarily, could be changed). -As said, in this stage basically the initial edm4hep files are read and the interesting features are computed. Furthermore, in our version, the clustering is done explicitly. +As said, in this stage basically the initial edm4hep files are read and the interesting features are computed. Furthermore, in our version, the clustering is done explicitly. In the initial tree each entry corresponds to an event. @@ -79,7 +84,7 @@ Let's go through the code. .Define("RP_py", "ReconstructedParticle::get_py(ReconstructedParticles)") .Define("RP_pz", "ReconstructedParticle::get_pz(ReconstructedParticles)") .Define("RP_e", "ReconstructedParticle::get_e(ReconstructedParticles)") - + #build pseudo jets with the RP .Define("pseudo_jets", "JetClusteringUtils::set_pseudoJets(RP_px, RP_py, RP_pz, RP_e)") #run jet clustering with all reconstructed particles. ee_genkt_algorithm, R=1.5, inclusive clustering, E-scheme @@ -92,13 +97,13 @@ Let's go through the code. In the initial tree, all the particles measured in one event are saved in one entry of the branch _ReconstructedParticles_ in a `ROOT::VecOps::RVec`. The line `.Define("RP_px", "ReconstructedParticle::get_px(ReconstructedParticles)")` takes this branch and for all entries computes px of each particle; the output of this call is a branch called _RP_px_ containing an `RVec` per each event. -The jet clustering is performed using the 4-momenta of the reconstructed particles. This operation returns two outputs: +The jet clustering is performed using the 4-momenta of the reconstructed particles. This operation returns two outputs: - `jets_ee_genkt` : `RVec< fastjet::Pseudojet >` ; `Pseudojet` methods and attributes allow to access the overall jet properties (implemented in `JetClusteringUtils.cc`). - - `jetconstituents_ee_genkt` : `RVec< RVec < int > >` , which is a vector of vectors of integer labels which were assigned to the particles during the clustering by the function `JetClusteringUtils::set_pseudoJets` : + - `jetconstituents_ee_genkt` : `RVec< RVec < int > >` , which is a vector of vectors of integer labels which were assigned to the particles during the clustering by the function `JetClusteringUtils::set_pseudoJets` : ``` -std::vector JetClusteringUtils::set_pseudoJets(ROOT::VecOps::RVec px, - ROOT::VecOps::RVec py, - ROOT::VecOps::RVec pz, +std::vector JetClusteringUtils::set_pseudoJets(ROOT::VecOps::RVec px, + ROOT::VecOps::RVec py, + ROOT::VecOps::RVec pz, ROOT::VecOps::RVec e) { std::vector result; unsigned index = 0; @@ -109,7 +114,7 @@ std::vector JetClusteringUtils::set_pseudoJets(ROOT::VecOps: } return result; } -``` +``` and that are now used to associate the particles to the belonging jet. In fact, by calling ``` .Define("JetsConstituents", "JetConstituentsUtils::build_constituents_cluster(ReconstructedParticles, jetconstituents_ee_genkt)") #build jet constituents @@ -128,12 +133,12 @@ rv::RVec build_constituents_cluster(const rv::RVec > is the key data structure for treating jets constituents in an event and will be mantained also when computing the features of the constituents for this stage. @@ -143,7 +148,7 @@ rv::RVec get_erel_log_cluster(const rv::RVec& jcs) { rv::RVec out; for (size_t i = 0; i < jets.size(); ++i) { // external loop: first index (i) running over the jets in the event - auto& jet_csts = out.emplace_back(); //in the ext. loop we fill the external vector with vectors + auto& jet_csts = out.emplace_back(); //in the ext. loop we fill the external vector with vectors //(one per jet) float e_jet = jets.at(i).E(); auto csts = get_jet_constituents(jcs, i); @@ -161,7 +166,7 @@ rv::RVec get_erel_log_cluster(const rv::RVec ` and the features of the constituents of the jets are saved as `RVec < RVec < float > >` ; the return type is actually a pointer to these structures. We still need to rearrange the structure from a per-event tree of pointers to RVec to a per-jet tree of arrays (an ntuple). @@ -169,155 +174,17 @@ At the end of this stage we have a tree in which each entry is an event; the fea #### What if implicit clustering ? ... -### Stage_ntuple : `stage2.cpp` +### Stage_ntuple : `stage2.py` The main goal of this stage is to rearrange the tree obtained in _stage1_ to a per-jet format, but other tasks are accomplished: * setting the flags of the class which the jets belong to; * checking the number of events actually considered is the wanted one; * there is a $\sim 30\%$ cases in which the clustering returns more than 2 jets, and $\sim$ few per million cases in which less than 2 jets are returned; so in the first case just the two higher energy jets are considered, while in the second case no jet is considered; a count of this events is printed to stdout. - -`stage2.cpp` takes 4 arguments: `USAGE: ./stage [root_inFileName] [root_outFileName] N_i N_f` -1. [root_inFileName] : path to input file in the form `path_to_stage1file/stage1_infilename` , + +`stage2.py` takes 4 arguments: `USAGE: python stage2.py [root_inFileName] [root_outFileName] N_i N_f` +1. [root_inFileName] : path to input file in the form `path_to_stage1file/stage1_infilename` , 2. [root_outFileName] : path to output file in the form `path_to_outputdir/outfilename` , 3. N_i : index of the event from where start reading the tree , 4. N_f : index of the event ehrtr to stop reading the tree . -Our choices are implemented in the app `all_stages.py`, so will be explained in the next section. +Our choices are implemented in the app `stage_all.py`, so will be explained in the next section. Now, let's go through the code. - -###### Loop structure - -The general structure of the loop is: -``` -loop : events { - ... - loop : jets { - ... - loop : constituents { - ... - } - ntuple.Fill() - } -} -``` - -The position of ntuple.Fill() inside the loop structure determines the per-jet structure. -We modified this basic structure: -* we insert where to start looping by `N_i` and how many events to consider by `Nevents_Max = N_f - N_i`; -* we loop over the events from N_i to the end of the tree, but introduce an external counter `saved_events_counts` which grows only if the event has been saved; this is the reliable counter! In fact, if the event is "strange" we don't save any jet, we skip to the next event. The loop breaks when `saved_events_counts == Nevents_Max` if the loop has not reached the end of the input file. Let's study different cases: - - If `nentries - N_i < Nevents_Max` $\implies$ in the file there are less events than required; no error produced, but in stdout will be printed the actual number of saved events; - - If `nentries - N_i > Nevents_Max` $\implies$ in the file there are exactly $Nevents_{Max}$; - - If `nentries - N_i = Nevents_Max` but there are strange events I will have less saved events, no error, but I know from stdout. - -Are considered "strange" events those in which the clustering returns `njets < 2`; in that case we skip the loop. If `njets >= 2` the loop is performed on the first two jets only (the ones having more ENERGY, they're ordered in stage1; the successives not expected: leak in clustering). - -``` -int N_i = atoi(argv[3]); //where to start reading the tree -int N_f = atoi(argv[4]); -int Nevents_Max = N_f - N_i; //number of events to be saved -int saved_events_counts = 0; //counts the number of events actually saved -int nentries = ev->GetEntries(); //total number of events in the tree - -for(int i = N_i+1; i < nentries; ++i) { // Loop over the events - ev->GetEntry(i); - njet = nJets; - - if (njet < 2) { //exclude the events with less than two jets - continue ; - } - - for(int j=0; j < 2; ++j) { //Loop over the jets inside the i-th event - //we only take the first two jets (the ones having more ENERGY, they're ordered in stage1), - //the third not expected: leak in clustering - - ... - - nconst = (count_Const->at(j)); - - for(int k = 0; k < nconst; ++k){ //Loop over the constituents of the j-th jet in the i-th event - ... - } - ntuple->Fill(); - } - saved_events_counts += 1; //we count the num of events saved - if (saved_events_counts == Nevents_Max) { //interrupt the loop if Nevents_max events have already been saved - break; - } -} -``` - - -###### Translating -As an example, we take one jet feature (stored as `RVec < float> *` in the per-event tree) and one constituent feature (`RVec < RVec < float> > *`) and follow them through the code. - -``` -//Setting variables for reading -int nJets; //number of jets in the event -int nconst = 0; //number of constituents of the jets -ROOT::VecOps::RVec *Jets_e=0; -ROOT::VecOps::RVec > *JetsConstituents_e = 0; - -ev->SetBranchAddress("njet", &nJets); -ev->SetBranchAddress("nconst", &count_Const); -ev->SetBranchAddress("Jets_e", &Jets_e); -ev->SetBranchAddress("JetsConstituents_e", &JetsConstituents_e); - -int njet = 0; -int nconst = 0; -double recojet_e; -float pfcand_e[1000] = {0.}; //here we initialize wit a large size - -ntuple->Branch("nconst", &nconst, "nconst/I"); -ntuple->Branch("recojet_e", &recojet_e); -ntuple->Branch("pfcand_e", pfcand_e, "pfcand_e[nconst]/F"); - -loop : i over events - loop : j over jets - - recojet_e = (*Jets_e)[j]; //Pointer usage - nconst = (count_Const->at(j)); - - loop : k over constituents - pfcand_e[k] = (JetsConstituents_e->at(j))[k]; //k-th element of the j-th vector - //pointed by JetsConstituents_e -``` - -In the ntuple, a jet overall property will be just a `double`. -Since we don't know a priori how many constituents the jet has, we initialize a constituent feature as an array with a number of elements larger than possible number of constituents, to be sure that we succeed in reading all of them from the input file (this array works as a temporary "home" before being sent to the output file); we initialize all the values to 0. When we create the branch of the ntuple (`ntuple->Branch("pfcand_e", pfcand_e, "pfcand_e[nconst]/F")`), we pass as the size of the entry the actual number of constituents `nconst` which has already been read into another branch (during the loop), in order not to save all the padding zeros contained in the local array. - -###### Setting the flags -We read the name and take the character in the name corresponding to the class (in this case last letter before .root); we fix the flag in the beginning and never change it anymore; since it is pointing to the ntuple branch, everytime I call `ntuple.Fill()` the same value will be added to the branch. -``` -std::string infileName(argv[1]); -char flavour = infileName[infileName.length()-6]; - -float is_q = 0.; -float is_b = 0.; -float is_c = 0.; -float is_s = 0.; -float is_g = 0.; - -if (flavour == 'q') {is_q = 1.;} -if (flavour == 'b') {is_b = 1.;} -if (flavour == 'c') {is_c = 1.;} -if (flavour == 's') {is_s = 1.;} -if (flavour == 'g') {is_g = 1.;} -if (flavour == 't') {is_t = 1.;} - -ntuple->Branch("pfcand_isMu", pfcand_isMu, "pfcand_isMu[nconst]/F"); -ntuple->Branch("pfcand_isEl", pfcand_isEl, "pfcand_isEl[nconst]/F"); -ntuple->Branch("pfcand_isChargedHad", pfcand_isChargedHad, "pfcand_isChargedHad[nconst]/F"); -ntuple->Branch("pfcand_isGamma", pfcand_isGamma, "pfcand_isGamma[nconst]/F"); -ntuple->Branch("pfcand_isNeutralHad", pfcand_isNeutralHad, "pfcand_isNeutralHad[nconst]/F"); - -loop : events { - ... - loop: jets { - ... - loop: constituents { - ... - } - ntuple.Fill() - } -} -``` - diff --git a/examples/FCCee/weaver/all_stages.py b/examples/FCCee/weaver/all_stages.py deleted file mode 100644 index cbcfd4b679..0000000000 --- a/examples/FCCee/weaver/all_stages.py +++ /dev/null @@ -1,145 +0,0 @@ -import sys -import os -import argparse -import multiprocessing as mp -import subprocess -from subprocess import Popen, PIPE -from datetime import date -import time - - -# ________________________________________________________________________________ -def main(): - parser = argparse.ArgumentParser() - - parser.add_argument( - "--indir", - help="path input directory", - default="/eos/experiment/fcc/ee/generation/DelphesEvents/pre_fall2022_training/IDEA/", - ) - parser.add_argument( - "--outdir", - help="path output directory", - default="/eos/experiment/fcc/ee/jet_flavour_tagging/pre_fall2022_training/IDEA/", - ) - parser.add_argument("--nev", help="number of events", type=int, default=1000000) - parser.add_argument("--fsplit", help="fraction train/test", type=float, default=0.9) - parser.add_argument("--dry", help="dry run ", action='store_true') - - args = parser.parse_args() - inDIR = args.indir - outDIR = args.outdir - - # create necessary subdirectories - actualDIR = subprocess.check_output(["bash", "-c", "pwd"], universal_newlines=True)[:-1] - username = subprocess.check_output(["bash", "-c", "echo $USER"], universal_newlines=True)[:-1] - today = date.today().strftime("%Y%b%d") - subdir = username + "_" + today - subprocess.call(["bash", "-c", "mkdir -p {}".format(subdir)], cwd=outDIR) - OUTDIR = outDIR + username + "_" + today + "/" - print(OUTDIR) - - # set total number of events - N = args.nev - frac_split = args.fsplit - N_split = int(frac_split * N) - - print("") - print("=================================================") - print("") - print(" INDIR = {}".format(inDIR)) - print(" OUTDIR = {}".format(OUTDIR)) - print(" NEVENTS = {}".format(N)) - print(" FRAC SPLIT = {}".format(frac_split)) - print("") - - # compile stage2 c++ code - cmd_compile = "g++ -o stage2 stage2.cpp `root-config --cflags --libs` -Wall" - subprocess.check_call(cmd_compile, shell=True, stdout=None, stderr=None) - - # create commands - #cmdbase_stage1 = "fccanalysis run stage1.py --nevents {}".format(N) - cmdbase_stage1 = "fccanalysis run stage1.py".format(N) - opt1_out = " --output {}stage1_ee_ZH_vvCLASS.root ".format(OUTDIR) - opt1_in = " --files-list {}p8_ee_ZH_Znunu_HCLASS_ecm240/*.root ".format(inDIR) - wait = " ; sleep 30;" - cmd_stage1 = cmdbase_stage1 + opt1_out + opt1_in - - cmdbase_stagentuple = "./stage2 DIRstage1_ee_ZH_vvCLASS.root DIRntuple_MOD_ee_ZH_vvCLASS.root " - cmd_stage2 = cmdbase_stagentuple.replace("DIR", OUTDIR) - - samples = ["bb", "cc", "ss", "gg", "qq"] - mods = ["train", "test"] - - # create files storing stdout and stderr - list_stdout = [open(OUTDIR + "{}_stdout.txt".format(sample), "w") for sample in samples] - list_stderr = [open(OUTDIR + "{}_stderr.txt".format(sample), "w") for sample in samples] - - ###=== RUN STAGE 1 - for i, sample in enumerate(samples): - - if sample == "qq": - cmd_stage1_f = ( - cmdbase_stage1 - + opt1_out.replace("CLASS", "qq") - + " --files-list {}p8_ee_ZH_Znunu_Huu_ecm240/*.root {}p8_ee_ZH_Znunu_Hdd_ecm240/*.root".format( - inDIR, inDIR - ) - ) - else: - cmd_stage1_f = cmd_stage1.replace("CLASS", sample) - - print(cmd_stage1_f) - # run stage1 - start1_time = time.time() - if not args.dry: - subprocess.check_call( - cmd_stage1_f, shell=True, stdout=list_stdout[i], stderr=list_stderr[i] - ) - end1_time = time.time() - list_stdout[i].write("Stage1 time: {} \n".format(end1_time - start1_time)) - - ###=== RUN STAGE 2 - threads = [] - for i, sample in enumerate(samples): - - cmd_stage2_train = cmd_stage2.replace("CLASS", sample).replace( - "MOD", mods[0] - ) + " {} {} ".format(0, N_split) - cmd_stage2_test = cmd_stage2.replace("CLASS", sample).replace( - "MOD", mods[1] - ) + " {} {} ".format(N_split, N) - print(cmd_stage2_train) - print(cmd_stage2_test) - - if not args.dry: - thread = mp.Process( - target=ntuplizer, - args=(cmd_stage2_train, cmd_stage2_test, list_stdout[i], list_stderr[i]), - ) - thread.start() - threads.append(thread) - - for proc in threads: - proc.join() - - - for i in range(len(list_stdout)): - list_stdout[i].close() - list_stderr[i].close() - - -# ________________________________________________________________________________ -def ntuplizer(cmd_stage2_train, cmd_stage2_test, f_stdout, f_stderr): - - start2_time = time.time() - subprocess.check_call(cmd_stage2_train, shell=True, stdout=f_stdout, stderr=f_stderr) - subprocess.check_call(cmd_stage2_test, shell=True, stdout=f_stdout, stderr=f_stderr) - end2_time = time.time() - f_stdout.write("stage2 time (run only): {} \n".format(end2_time - start2_time)) - - -# _______________________________________________________________________________________ -if __name__ == "__main__": - main() - diff --git a/examples/FCCee/weaver/analysis_inference.py b/examples/FCCee/weaver/analysis_inference.py index b449ffad7a..e11e9d7b2a 100644 --- a/examples/FCCee/weaver/analysis_inference.py +++ b/examples/FCCee/weaver/analysis_inference.py @@ -1,263 +1,85 @@ -#Mandatory: List of processes -processList = { - #prefall2022 samples (generated centrally) - 'p8_ee_ZH_Znunu_Hbb_ecm240':{}, #1030000 events - 'p8_ee_ZH_Znunu_Hcc_ecm240':{}, #1060000 - 'p8_ee_ZH_Znunu_Hss_ecm240':{}, #1060000 - 'p8_ee_ZH_Znunu_Hgg_ecm240':{'fraction':0.5}, #2000000 - 'p8_ee_ZH_Znunu_Huu_ecm240':{'fraction':0.5}, #we take only half sample for uu,dd because they will go into qq label which contains both - 'p8_ee_ZH_Znunu_Hdd_ecm240':{'fraction':0.5}, #and we want for qq same number of jets as other classes; the two files 2080000 events in total, 1040000 each? -} +import os +import urllib.request -#Mandatory: Production tag when running over EDM4Hep centrally produced events, this points to the yaml files for getting sample statistics -#prodTag = "FCCee/spring2021/IDEA/" -prodTag = "FCCee/pre_fall2022_training/IDEA/" #for prefall2022 samples +# ____________________________________________________________ +def get_file_path(url, filename): + if os.path.exists(filename): + return os.path.abspath(filename) + else: + urllib.request.urlretrieve(url, os.path.basename(url)) + return os.path.basename(url) -#Optional: output directory, default is local running directory -#outputDir = "/eos/home-a/adelvecc/FCCevaluate/" +# ____________________________________________________________ -#Optional -nCPUS = 8 -runBatch = False -#batchQueue = "longlunch" -#compGroup = "group_u_FCC.local_gen" +## input file needed for unit test in CI +testFile = "https://fccsw.web.cern.ch/fccsw/testsamples/wzp6_ee_nunuH_Hss_ecm240.root" +## latest particle transformer model, trainied on 9M jets in winter2023 samples +model_name = "fccee_flavtagging_edm4hep_wc_v1" -#USER DEFINED CODE +## model files needed for unit testing in CI +url_model_dir = "https://fccsw.web.cern.ch/fccsw/testsamples/jet_flavour_tagging/winter2023/wc_pt_13_01_2022/" +url_preproc = "{}/{}.json".format(url_model_dir, model_name) +url_model = "{}/{}.onnx".format(url_model_dir, model_name) -#Mandatory: RDFanalysis class where the use defines the operations on the TTree -class RDFanalysis(): - #__________________________________________________________ - #Mandatory: analysers funtion to define the analysers to process, please make sure you return the last dataframe, in this example it is df2 - def analysers(df): - - from ROOT import JetFlavourUtils - weaver = JetFlavourUtils.setup_weaver( - "/eos/experiment/fcc/ee/jet_flavour_tagging/pre_fall2022_training/IDEA/selvaggi_2022Oct30/fccee_flavtagging_edm4hep_v2.onnx", #name of the trained model exported - "/eos/experiment/fcc/ee/jet_flavour_tagging/pre_fall2022_training/IDEA/selvaggi_2022Oct30/preprocess_fccee_flavtagging_edm4hep_v2.json", #.json file produced by weaver during training - ( - "pfcand_erel_log", #list of the training variables, - "pfcand_thetarel", #will be used for predictions as well - "pfcand_phirel", - "pfcand_dxy", - "pfcand_dz", - "pfcand_dptdpt", - "pfcand_detadeta", - "pfcand_dphidphi", - "pfcand_dxydxy", - "pfcand_dzdz", - "pfcand_dxydz", - "pfcand_dphidxy", - "pfcand_dlambdadz", - "pfcand_dxyc", - "pfcand_dxyctgtheta", - "pfcand_phic", - "pfcand_phidz", - "pfcand_phictgtheta", - "pfcand_cdz", - "pfcand_cctgtheta", - "pfcand_mtof", - "pfcand_dndx", - "pfcand_charge", - "pfcand_isMu", - "pfcand_isEl", - "pfcand_isChargedHad", - "pfcand_isGamma", - "pfcand_isNeutralHad", - "pfcand_btagSip2dVal", - "pfcand_btagSip2dSig", - "pfcand_btagSip3dVal", - "pfcand_btagSip3dSig", - "pfcand_btagJetDistVal", - "pfcand_btagJetDistSig", - ), - ) - - df2 = ( - df - ### COMPUTE THE VARIABLES FOR INFERENCE OF THE TRAINING MODEL - ### This section should be equal to the one used for training - - ### MC primary vertex - .Define("MC_PrimaryVertex", "FCCAnalyses::MCParticle::get_EventPrimaryVertex(21)( Particle )" ) - - # CLUSTERING - #define the RP px, py, pz and e - .Define("RP_px", "ReconstructedParticle::get_px(ReconstructedParticles)") - .Define("RP_py", "ReconstructedParticle::get_py(ReconstructedParticles)") - .Define("RP_pz", "ReconstructedParticle::get_pz(ReconstructedParticles)") - .Define("RP_e", "ReconstructedParticle::get_e(ReconstructedParticles)") - .Define("RP_m", "ReconstructedParticle::get_mass(ReconstructedParticles)") - .Define("RP_q", "ReconstructedParticle::get_charge(ReconstructedParticles)") - - #build pseudo jets with the RP - .Define("pseudo_jets", "JetClusteringUtils::set_pseudoJets(RP_px, RP_py, RP_pz, RP_e)") - #run jet clustering with all reconstructed particles. ee_genkt_algorithm, R=1.5, inclusive clustering, E-scheme - .Define("FCCAnalysesJets_ee_genkt", "JetClustering::clustering_ee_genkt(1.5, 0, 0, 0, 0, -1)(pseudo_jets)") - #get the jets out of the struct - .Define("jets_ee_genkt", "JetClusteringUtils::get_pseudoJets(FCCAnalysesJets_ee_genkt)") - #get the jets constituents out of the struct - .Define("jetconstituents_ee_genkt","JetClusteringUtils::get_constituents(FCCAnalysesJets_ee_genkt)") - - #===== COMPUTE TRAINING FEATURES - - .Define("JetsConstituents", "JetConstituentsUtils::build_constituents_cluster(ReconstructedParticles, jetconstituents_ee_genkt)") #build jet constituents lists +## model files locally stored on /eos +model_dir = "/eos/experiment/fcc/ee/jet_flavour_tagging/winter2023/wc_pt_13_01_2022/" +local_preproc = "{}/{}.json".format(model_dir, model_name) +local_model = "{}/{}.onnx".format(model_dir, model_name) - ### Types of particles - .Alias("MCRecoAssociations0", "MCRecoAssociations#0.index") - .Alias("MCRecoAssociations1", "MCRecoAssociations#1.index") - .Define("JetsConstituents_Pids", "JetConstituentsUtils::get_PIDs_cluster(MCRecoAssociations0, MCRecoAssociations1, ReconstructedParticles, Particle, jetconstituents_ee_genkt)") - - .Define("pfcand_isMu", "JetConstituentsUtils::get_isMu(JetsConstituents_Pids)") - .Define("pfcand_isEl", "JetConstituentsUtils::get_isEl(JetsConstituents_Pids)") - .Define("pfcand_isChargedHad", "JetConstituentsUtils::get_isChargedHad(JetsConstituents_Pids, JetsConstituents)") - .Define("pfcand_isGamma", "JetConstituentsUtils::get_isGamma(JetsConstituents_Pids)") - .Define("pfcand_isNeutralHad", "JetConstituentsUtils::get_isNeutralHad(JetsConstituents_Pids, JetsConstituents)") - - ### Kinematics, displacement, PID +## get local file, else download from url +weaver_preproc = get_file_path(url_preproc, local_preproc) +weaver_model = get_file_path(url_model, local_model) - .Define("pfcand_erel", "JetConstituentsUtils::get_erel_cluster(jets_ee_genkt, JetsConstituents)") - .Define("pfcand_erel_log", "JetConstituentsUtils::get_erel_log_cluster(jets_ee_genkt, JetsConstituents)") - .Define("pfcand_thetarel", "JetConstituentsUtils::get_thetarel_cluster(jets_ee_genkt, JetsConstituents)") - .Define("pfcand_phirel", "JetConstituentsUtils::get_phirel_cluster(jets_ee_genkt, JetsConstituents)") +from addons.ONNXRuntime.python.jetFlavourHelper import JetFlavourHelper +from addons.FastJet.python.jetClusteringHelper import ExclusiveJetClusteringHelper - .Define("pfcand_charge", "JetConstituentsUtils::get_charge(JetsConstituents)") - .Define("pfcand_dndx", "JetConstituentsUtils::get_dndx(JetsConstituents, EFlowTrack_2, EFlowTrack, pfcand_isChargedHad)") - .Define("pfcand_mtof", "JetConstituentsUtils::get_mtof(JetsConstituents, EFlowTrack_L, EFlowTrack, TrackerHits, JetsConstituents_Pids)") +jetFlavourHelper = None +jetClusteringHelper = None - .Define("Bz", "ReconstructedParticle2Track::Bz(ReconstructedParticles, EFlowTrack_1)") +# Mandatory: RDFanalysis class where the use defines the operations on the TTree +class RDFanalysis: + # __________________________________________________________ + # Mandatory: analysers funtion to define the analysers to process, please make sure you return the last dataframe, in this example it is df2 + def analysers(df): + global jetClusteringHelper + global jetFlavourHelper - .Define("pfcand_dxy", "JetConstituentsUtils::XPtoPar_dxy(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") - .Define("pfcand_dz", "JetConstituentsUtils::XPtoPar_dz(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") - .Define("pfcand_phi0", "JetConstituentsUtils::XPtoPar_phi(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") - .Define("pfcand_C", "JetConstituentsUtils::XPtoPar_C(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") - .Define("pfcand_ct", "JetConstituentsUtils::XPtoPar_ct(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") + from examples.FCCee.weaver.config import collections, njets - .Define("pfcand_dptdpt", "JetConstituentsUtils::get_omega_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_dxydxy", "JetConstituentsUtils::get_d0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_dzdz", "JetConstituentsUtils::get_z0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_dphidphi", "JetConstituentsUtils::get_phi0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_detadeta", "JetConstituentsUtils::get_tanlambda_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_dxydz", "JetConstituentsUtils::get_d0_z0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_dphidxy", "JetConstituentsUtils::get_phi0_d0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_phidz", "JetConstituentsUtils::get_phi0_z0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_phictgtheta", "JetConstituentsUtils::get_tanlambda_phi0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_dxyctgtheta", "JetConstituentsUtils::get_tanlambda_d0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_dlambdadz", "JetConstituentsUtils::get_tanlambda_z0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_cctgtheta", "JetConstituentsUtils::get_omega_tanlambda_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_phic", "JetConstituentsUtils::get_omega_phi0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_dxyc", "JetConstituentsUtils::get_omega_d0_cov(JetsConstituents, EFlowTrack_1)") - .Define("pfcand_cdz", "JetConstituentsUtils::get_omega_z0_cov(JetsConstituents, EFlowTrack_1)") + tag = "" - .Define("pfcand_btagSip2dVal", "JetConstituentsUtils::get_Sip2dVal_clusterV(jets_ee_genkt, pfcand_dxy, pfcand_phi0, MC_PrimaryVertex, Bz)") - .Define("pfcand_btagSip2dSig", "JetConstituentsUtils::get_Sip2dSig(pfcand_btagSip2dVal, pfcand_dxydxy)") - .Define("pfcand_btagSip3dVal", "JetConstituentsUtils::get_Sip3dVal_clusterV(jets_ee_genkt, pfcand_dxy, pfcand_dz, pfcand_phi0, MC_PrimaryVertex, Bz)") - .Define("pfcand_btagSip3dSig", "JetConstituentsUtils::get_Sip3dSig(pfcand_btagSip3dVal, pfcand_dxydxy, pfcand_dzdz)") - .Define("pfcand_btagJetDistVal", "JetConstituentsUtils::get_JetDistVal_clusterV(jets_ee_genkt, JetsConstituents, pfcand_dxy, pfcand_dz, pfcand_phi0, MC_PrimaryVertex, Bz)") - .Define("pfcand_btagJetDistSig", "JetConstituentsUtils::get_JetDistSig(pfcand_btagJetDistVal, pfcand_dxydxy, pfcand_dzdz)") + ## define jet clustering parameters + jetClusteringHelper = ExclusiveJetClusteringHelper(collections["PFParticles"], njets, tag) - - ##### RUN INFERENCE (fixed by the previous section) + ## run jet clustering + df = jetClusteringHelper.define(df) - .Define( - "MVAVec", - "JetFlavourUtils::get_weights(\ - pfcand_erel_log,\ - pfcand_thetarel,\ - pfcand_phirel,\ - pfcand_dxy,\ - pfcand_dz,\ - pfcand_dptdpt,\ - pfcand_dphidphi,\ - pfcand_detadeta,\ - pfcand_dxydxy,\ - pfcand_dzdz,\ - pfcand_dxydz,\ - pfcand_dphidxy,\ - pfcand_dlambdadz,\ - pfcand_dxyc,\ - pfcand_dxyctgtheta,\ - pfcand_phic,\ - pfcand_phidz,\ - pfcand_phictgtheta,\ - pfcand_cdz,\ - pfcand_cctgtheta,\ - pfcand_mtof,\ - pfcand_dndx,\ - pfcand_charge,\ - pfcand_isMu,\ - pfcand_isEl,\ - pfcand_isChargedHad,\ - pfcand_isGamma,\ - pfcand_isNeutralHad,\ - pfcand_btagSip2dVal,\ - pfcand_btagSip2dSig,\ - pfcand_btagSip3dVal,\ - pfcand_btagSip3dSig,\ - pfcand_btagJetDistVal,\ - pfcand_btagJetDistSig\ - )", - ) - - ##### RECAST OUTPUT (get predictions per each sample) - .Define("recojet_isG", "JetFlavourUtils::get_weight(MVAVec, 0)") - .Define("recojet_isQ", "JetFlavourUtils::get_weight(MVAVec, 1)") - .Define("recojet_isS", "JetFlavourUtils::get_weight(MVAVec, 2)") - .Define("recojet_isC", "JetFlavourUtils::get_weight(MVAVec, 3)") - .Define("recojet_isB", "JetFlavourUtils::get_weight(MVAVec, 4)") - - ##### COMPUTE OBSERVABLES FOR ANALYSIS - #if not changing training etc... but only interested in the analysis using a trained model (fixed classes), you should only operate in this section. - #if you're interested in saving variables used for training don't need to compute them again, just - #add them to the list in at the end of the code - - #EXAMPLE + ## define jet flavour tagging parameters - #EVENT LEVEL - .Define("njet", "JetConstituentsUtils::count_jets(JetsConstituents)") + jetFlavourHelper = JetFlavourHelper( + collections, + jetClusteringHelper.jets, + jetClusteringHelper.constituents, + tag, + ) - #JET LEVEL - #jet kinematics - .Define("recojet_pt", "JetClusteringUtils::get_pt(jets_ee_genkt)") - .Define("recojet_e", "JetClusteringUtils::get_e(jets_ee_genkt)") - .Define("recojet_mass", "JetClusteringUtils::get_m(jets_ee_genkt)") - .Define("recojet_phi", "JetClusteringUtils::get_phi(jets_ee_genkt)") - .Define("recojet_theta", "JetClusteringUtils::get_theta(jets_ee_genkt)") + ## define observables for tagger + df = jetFlavourHelper.define(df) - .Define("tlv_jets", "JetConstituentsUtils::compute_tlv_jets(jets_ee_genkt)") - .Define("invariant_mass", "JetConstituentsUtils::InvariantMass(tlv_jets[0], tlv_jets[1])") + ## tagger inference + df = jetFlavourHelper.inference(weaver_preproc, weaver_model, df) - #counting types of particles composing the jet - .Define("nconst", "JetConstituentsUtils::count_consts(JetsConstituents)") - .Define("nmu", "JetConstituentsUtils::count_type(pfcand_isMu)") - .Define("nel", "JetConstituentsUtils::count_type(pfcand_isEl)") - .Define("nchargedhad", "JetConstituentsUtils::count_type(pfcand_isChargedHad)") - .Define("nphoton", "JetConstituentsUtils::count_type(pfcand_isGamma)") - .Define("nneutralhad", "JetConstituentsUtils::count_type(pfcand_isNeutralHad)") + return df - #CONSTITUENTS LEVEL - .Define("pfcand_e", "JetConstituentsUtils::get_e(JetsConstituents)") - .Define("pfcand_pt", "JetConstituentsUtils::get_pt(JetsConstituents)") - .Define("pfcand_theta", "JetConstituentsUtils::get_theta(JetsConstituents)") - .Define("pfcand_phi", "JetConstituentsUtils::get_phi(JetsConstituents)") + # __________________________________________________________ + # Mandatory: output function, please make sure you return the branchlist as a python list + def output(): - ) + ## outputs jet properties + branchList = jetClusteringHelper.outputBranches() - return df2 + ## outputs jet scores and constituent breakdown + branchList += jetFlavourHelper.outputBranches() - #__________________________________________________________ - #SAVE PREDICTIONS & OBSERVABLES FOR ANALYSIS - #Mandatory: output function, please make sure you return the branchlist as a python list - def output(): - branchList = [ - #predictions - 'recojet_isG', 'recojet_isQ', 'recojet_isS', 'recojet_isC', 'recojet_isB', - #observables - 'recojet_mass', 'recojet_e', 'recojet_pt', - 'invariant_mass', - 'nconst', 'nchargedhad', - 'pfcand_e', 'pfcand_pt', 'pfcand_phi', - 'pfcand_erel', - 'pfcand_erel_log', - ] return branchList diff --git a/examples/FCCee/weaver/config.py b/examples/FCCee/weaver/config.py new file mode 100644 index 0000000000..916a3970d3 --- /dev/null +++ b/examples/FCCee/weaver/config.py @@ -0,0 +1,456 @@ +""" +this configuration file contains the: +- list of flavors to be considered +- reconstruction sequence stored as two dicts: definition and alias +- the list of variables used in the tagger as well as their range for validation plotting +""" + +## number of jets for exclusive clustering +njets = 2 + +## name of collections in EDM root files +collections = { + "GenParticles": "Particle", + "PFParticles": "ReconstructedParticles", + "PFTracks": "EFlowTrack", + "PFPhotons": "EFlowPhoton", + "PFNeutralHadrons": "EFlowNeutralHadron", + "TrackState": "EFlowTrack_1", + "TrackerHits": "TrackerHits", + "CalorimeterHits": "CalorimeterHits", + "dNdx": "EFlowTrack_2", + "PathLength": "EFlowTrack_L", + "Bz": "magFieldBz", +} + +#### list of flavors f = g, q, c, s, ...(will look for input file name ccontaining "[Hff]") +flavors = ["g", "q", "s", "c", "b", "tau"] + +## define here the branches to be stored in the output root files in addition to the predefined one +## only the name of the var is used here, the metadata is used in stage_plots +variables_pfcand = { + "pfcand_erel_log": { + "name": "pfcand_erel_log", + "title": "log(E_{i}/E_{jet})", + "bin": 100, + "xmin": -3, + "xmax": 0, + "scale": "log", + }, + "pfcand_thetarel": { + "name": "pfcand_thetarel", + "title": "#theta_{rel}", + "bin": 100, + "xmin": 0.0, + "xmax": 3.0, + "scale": "lin", + }, + "pfcand_phirel": { + "name": "pfcand_phirel", + "title": "#phi_{rel}", + "bin": 100, + "xmin": -3.14, + "xmax": 3.14, + "scale": "lin", + }, + "pfcand_dptdpt": { + "name": "pfcand_dptdpt", + "title": "#sigma(#omega)^{2}", + "bin": 100, + "xmin": 0.0, + "xmax": 2e-09, + "scale": "log", + }, + "pfcand_detadeta": { + "name": "pfcand_detadeta", + "title": "#sigma(tan(#lambda))^{2}", + "bin": 100, + "xmin": 0.0, + "xmax": 0.02, + "scale": "log", + }, + "pfcand_dphidphi": { + "name": "pfcand_dphidphi", + "title": "#sigma(#phi))^{2}", + "bin": 100, + "xmin": 0.0, + "xmax": 0.0015, + "scale": "log", + }, + "pfcand_dxydxy": { + "name": "pfcand_dxydxy", + "title": "#sigma(d_{xy}))^{2}", + "bin": 100, + "xmin": 0.0, + "xmax": 0.20, + "scale": "log", + }, + "pfcand_dzdz": { + "name": "pfcand_dzdz", + "title": "#sigma(d_{z}))^{2}", + "bin": 100, + "xmin": 0.0, + "xmax": 0.50, + "scale": "log", + }, + "pfcand_dxydz": { + "name": "pfcand_dxydz", + "title": "C(d_{xy},d_{z})", + "bin": 100, + "xmin": -10, + "xmax": 10, + "scale": "log", + }, + "pfcand_dphidxy": { + "name": "pfcand_dphidxy", + "title": "C(#phi,d_{z})", + "bin": 100, + "xmin": -0.1, + "xmax": 0, + "scale": "log", + }, + "pfcand_dlambdadz": { + "name": "pfcand_dlambdadz", + "title": "C(tan(#lambda),d_{z})", + "bin": 100, + "xmin": -0.5, + "xmax": 0.1, + "scale": "log", + }, + "pfcand_dxyc": { + "name": "pfcand_dxyc", + "title": "C(#omega,d_{xy})", + "bin": 100, + "xmin": -0.2, + "xmax": 0.1, + "scale": "log", + }, + "pfcand_dxyctgtheta": { + "name": "pfcand_dxyctgtheta", + "title": "C(tan(#lambda),d_{xy})", + "bin": 100, + "xmin": -0.025, + "xmax": 0.025, + "scale": "log", + }, + "pfcand_phic": { + "name": "pfcand_phic", + "title": "C(#omega,#phi)", + "bin": 100, + "xmin": -1e-06, + "xmax": 1e-06, + "scale": "log", + }, + "pfcand_phidz": { + "name": "pfcand_phidz", + "title": "C(#phi,d_{z})", + "bin": 100, + "xmin": -0.05, + "xmax": 0.05, + "scale": "log", + }, + "pfcand_phictgtheta": { + "name": "pfcand_phictgtheta", + "title": "C(tan(#lambda),#phi)", + "bin": 100, + "xmin": -0.1e-03, + "xmax": 0.5e03, + "scale": "log", + }, + "pfcand_cdz": { + "name": "pfcand_cdz", + "title": "C(#omega,d_{z})", + "bin": 100, + "xmin": -0.5e-03, + "xmax": 0.1e-03, + "scale": "log", + }, + "pfcand_cctgtheta": { + "name": "pfcand_cctgtheta", + "title": "C(#omega, tan(#lambda))", + "bin": 100, + "xmin": -1e-06, + "xmax": 20e-06, + "scale": "log", + }, + "pfcand_mtof": { + "name": "pfcand_mtof", + "title": "m_{ToF} [GeV]", + "bin": 100, + "xmin": 0, + "xmax": 1.5, + "scale": "lin", + }, + "pfcand_dndx": { + "name": "pfcand_dndx", + "title": "dN/dx [mm^{-1}]", + "bin": 100, + "xmin": 0, + "xmax": 5, + "scale": "lin", + }, + "pfcand_charge": { + "name": "pfcand_charge", + "title": "Q", + "bin": 2, + "xmin": -0.5, + "xmax": 1.5, + "scale": "lin", + }, + "pfcand_isMu": { + "name": "pfcand_isMu", + "title": "is muon", + "bin": 2, + "xmin": -0.5, + "xmax": 1.5, + "scale": "lin", + }, + "pfcand_isEl": { + "name": "pfcand_isEl", + "title": "is electron", + "bin": 2, + "xmin": -0.5, + "xmax": 1.5, + "scale": "lin", + }, + "pfcand_isChargedHad": { + "name": "pfcand_isChargedHad", + "title": "is charged hadron", + "bin": 2, + "xmin": -0.5, + "xmax": 1.5, + "scale": "lin", + }, + "pfcand_isGamma": { + "name": "pfcand_isGamma", + "title": "is photon", + "bin": 2, + "xmin": -0.5, + "xmax": 1.5, + "scale": "lin", + }, + "pfcand_isNeutralHad": { + "name": "pfcand_isNeutralHad", + "title": "is neutral hadron", + "bin": 2, + "xmin": -0.5, + "xmax": 1.5, + "scale": "lin", + }, + "pfcand_type": { + "name": "pfcand_type", + "title": "PDG code", + "bin": 10000, + "xmin": -5000, + "xmax": 5000, + "scale": "lin", + }, + "pfcand_dxy": { + "name": "pfcand_dxy", + "title": "d_{xy} [mm]", + "bin": 100, + "xmin": -0.5, + "xmax": 0.5, + "scale": "log", + }, + "pfcand_dz": { + "name": "pfcand_dz", + "title": "d_{z} [mm]", + "bin": 100, + "xmin": -0.5, + "xmax": 0.5, + "scale": "log", + }, + "pfcand_btagSip2dVal": { + "name": "pfcand_btagSip2dVal", + "title": "2D signed IP [mm]", + "bin": 100, + "xmin": -0.5, + "xmax": 5, + "scale": "log", + }, + "pfcand_btagSip2dSig": { + "name": "pfcand_btagSip2dSig", + "title": "2D signed IP significance", + "bin": 100, + "xmin": -10, + "xmax": 10, + "scale": "log", + }, + "pfcand_btagSip3dVal": { + "name": "pfcand_btagSip3dVal", + "title": "3D signed IP [mm]", + "bin": 100, + "xmin": -0.5, + "xmax": 5, + "scale": "log", + }, + "pfcand_btagSip3dSig": { + "name": "pfcand_btagSip3dSig", + "title": "3D signed IP significance", + "bin": 100, + "xmin": -8, + "xmax": 10, + "scale": "log", + }, + "pfcand_btagJetDistVal": { + "name": "pfcand_btagJetDistVal", + "title": "distance to jet [mm]", + "bin": 100, + "xmin": 0, + "xmax": 5, + "scale": "log", + }, + "pfcand_btagJetDistSig": { + "name": "pfcand_btagJetDistSig", + "title": "distance to jet (significance)", + "bin": 100, + "xmin": 0, + "xmax": 10, + "scale": "log", + }, + ### other pf cand observer variables + "pfcand_e": { + "name": "pfcand_e", + "title": "E [GeV]", + "bin": 100, + "xmin": 0, + "xmax": 100, + "scale": "log", + }, + "pfcand_p": { + "name": "pfcand_p", + "title": "p [GeV]", + "bin": 100, + "xmin": 0, + "xmax": 100, + "scale": "log", + }, + "pfcand_theta": { + "name": "pfcand_theta", + "title": "#theta", + "bin": 100, + "xmin": 0, + "xmax": 3.14, + "scale": "lin", + }, + "pfcand_phi": { + "name": "pfcand_phi", + "title": "#phi", + "bin": 100, + "xmin": -3.14, + "xmax": 3.14, + "scale": "lin", + }, +} + +variables_jet = { + ### jet based variables + "jet_p": { + "name": "jet_p", + "title": "p_{jet} [GeV]", + "bin": 100, + "xmin": 0, + "xmax": 100.0, + "scale": "lin", + }, + "jet_e": { + "name": "jet_e", + "title": "E_{jet} [GeV]", + "bin": 100, + "xmin": 0, + "xmax": 100.0, + "scale": "lin", + }, + "jet_mass": { + "name": "jet_mass", + "title": "m_{jet} [GeV]", + "bin": 100, + "xmin": 0, + "xmax": 25, + "scale": "lin", + }, + "jet_phi": { + "name": "jet_phi", + "title": "#phi_{jet}", + "bin": 100, + "xmin": -3.14, + "xmax": 3.14, + "scale": "lin", + }, + "jet_theta": { + "name": "jet_theta", + "title": "#theta_{jet}", + "bin": 100, + "xmin": 0, + "xmax": 3.14, + "scale": "lin", + }, + "jet_nconst": { + "name": "jet_nconst", + "title": "N_{const}^{jet}", + "bin": 100, + "xmin": 0, + "xmax": 100, + "scale": "log", + }, + "jet_nmu": { + "name": "jet_nmu", + "title": "N_{#mu}^{jet}", + "bin": 5, + "xmin": 0, + "xmax": 5, + "scale": "log", + }, + "jet_nel": { + "name": "N_{el}^{jet}", + "title": "", + "bin": 5, + "xmin": 0, + "xmax": 5, + "scale": "log", + }, + "jet_nchad": { + "name": "jet_nchad", + "title": "N_{ch.had}^{jet}", + "bin": 50, + "xmin": 0, + "xmax": 50, + "scale": "log", + }, + "jet_ngamma": { + "name": "jet_ngamma", + "title": "N_{#gamma}^{jet}", + "bin": 50, + "xmin": 0, + "xmax": 50, + "scale": "log", + }, + "jet_nnhad": { + "name": "jet_nnhad", + "title": "N_{neutr. had}^{jet}", + "bin": 20, + "xmin": 0, + "xmax": 20, + "scale": "log", + }, +} + +variables_event = { + "event_invariant_mass": { + "name": "event_invariant_mass", + "title": "m_{jj} [GeV]", + "bin": 100, + "xmin": 0, + "xmax": 200, + "scale": "lin", + }, + "event_njet": { + "name": "event_njet", + "title": "N_{j} ", + "bin": 11, + "xmin": -0.5, + "xmax": 10, + "scale": "lin", + }, +} diff --git a/examples/FCCee/weaver/plot_rocs.py b/examples/FCCee/weaver/plot_rocs.py new file mode 100644 index 0000000000..2f69792de0 --- /dev/null +++ b/examples/FCCee/weaver/plot_rocs.py @@ -0,0 +1,408 @@ +import ROOT +import os +import argparse +import numpy as np +import glob +import sys +from collections import OrderedDict +from copy import deepcopy +from examples.FCCee.weaver.config import variables_pfcand, variables_jet, flavors +import matplotlib.pyplot as plt + +plt.rcParams["mathtext.fontset"] = "stix" +plt.rcParams["font.family"] = "STIXGeneral" +plt.rcParams["axes.labelweight"] = "bold" + +plt.gcf().subplots_adjust(bottom=0.15) + + +# compute binary discriminant +ROOT.gInterpreter.Declare( + """ +ROOT::VecOps::RVec binary_discriminant(ROOT::VecOps::RVec score_s, ROOT::VecOps::RVec score_b) +{ + ROOT::VecOps::RVec out; + for (int i=0; i 0) ? score_s.at(i) / den : 0; + //std::cout<= nfiles_max: + continue + files.append(fname) + + # print(files) + samples[proc][f] = Sample(files, "events", f, proc) + + roc_param = RocParam(2, 100) # ndecades, nbins + plot_param = PlotParams( + [Text("FCC-ee simulation (IDEA)", (0.75, 1.03), "bold", 13)], + ((0.3, 1.0), (0.001, 1.0)), + ("jet tagging efficiency", "jet misid. probability"), + ("linear", "log"), + ) + + ctag_cfg = { + "sig": "c", + "bkg": ["g", "q", "b"], + "samples": samples, + "variants": processes, + "param_roc": roc_param, + "param_plot": plot_param, + } + + btag_cfg = deepcopy(ctag_cfg) + btag_cfg["sig"] = "b" + btag_cfg["bkg"] = ["g", "q", "c"] + + stag_cfg = deepcopy(ctag_cfg) + stag_cfg["sig"] = "s" + stag_cfg["bkg"] = ["g", "q", "c", "b"] + stag_cfg["param_plot"].ranges = ((0.0, 1.0), (0.001, 1.0)) + + gtag_cfg = deepcopy(ctag_cfg) + gtag_cfg["sig"] = "g" + gtag_cfg["bkg"] = ["q", "s", "c", "b"] + gtag_cfg["param_plot"].ranges = ((0.0, 1.0), (0.001, 1.0)) + + roc_plot(ctag_cfg) + roc_plot(btag_cfg) + roc_plot(stag_cfg) + roc_plot(gtag_cfg) + + +# _______________________________________________________________________________ +class Sample: + def __init__(self, files, treename, flavor, label): + self.files = files + self.treename = treename + self.flavor = flavor + self.label = label + + +# _______________________________________________________________________________ +class Process: + def __init__(self, procname, name, label, dir): + self.procname = procname + self.name = name + self.label = label + self.dir = dir + + +# _______________________________________________________________________________ +class RocParam: + def __init__(self, ndec, nbins): + self.ndec = ndec # how many decades for log scale + self.nbins = nbins + + +# _______________________________________________________________________________ +class ROC: + def __init__(self, name, sample_s, sample_b, param, color, style): + self.name = name + self.sample_s = sample_s + self.sample_b = sample_b + self.range = param.ndec + self.nbins = param.nbins + self.color = color + self.style = style + self.x = [] + self.y = [] + + # self.name = "{}_{}".format(self.sample_s.flavor, self.sample_b.flavor) + self.label = "{} vs {} ({})".format(self.sample_s.flavor, self.sample_b.flavor, self.sample_s.label.label) + ## initialize histo rdataframe objects + self.histos = [] + df_s = ROOT.RDataFrame(self.sample_s.treename, self.sample_s.files) + df_b = ROOT.RDataFrame(self.sample_b.treename, self.sample_b.files) + + ## dict of rdf histos vs cut values + self.dh_s = dfhs( + df_s, + self.sample_s.flavor, + self.sample_b.flavor, + self.range, + self.nbins, + "sig_{}".format(self.name), + ) + self.dh_b = dfhs( + df_b, + self.sample_s.flavor, + self.sample_b.flavor, + self.range, + self.nbins, + "bkg_{}".format(self.name), + ) + + self.dhs = list(self.dh_s.values()) + list(self.dh_b.values()) + + def get_roc(self): + out_root = ROOT.TFile("{}.root".format(self.name), "RECREATE") + for cut, hist in self.dh_s.items(): + hist.Write() + for cut, hist in self.dh_b.items(): + hist.Write() + + x, y = [], [] + cuts = list(self.dh_s.keys()) + + def integral(h): + return float(h.Integral(0, h.GetNbinsX() + 1)) + + sum_s = integral(self.dh_s[cuts[0]]) + sum_b = integral(self.dh_b[cuts[0]]) + + if sum_s == 0 or sum_b == 0: + sys.exit("ERROR: histograms are empty...") + + for cut in cuts: + x.append(integral(self.dh_s[cut]) / sum_s) + y.append(integral(self.dh_b[cut]) / sum_b) + + self.x = x + self.y = y + + return x, y + + +# _______________________________________________________________________________ +class Text: + def __init__(self, text, location, weight, size): + self.text = text + self.location = location + self.weight = weight + self.size = size + + +# _______________________________________________________________________________ +class PlotParams: + def __init__(self, texts, ranges, axlabels, scales): + self.texts = texts + self.ranges = ranges + self.axlabels = axlabels + self.scales = scales + + +# _______________________________________________________________________________ +class Graph: + def __init__(self, rocs, params, fig_file): + self.rocs = rocs + self.texts = params.texts + self.ranges = params.ranges + self.titles = params.axlabels + self.scales = params.scales + self.fig_file = fig_file + + fig, ax = plt.subplots() + + ## plot curves + + print(fig_file) + for roc in rocs: + print( + roc.name, + roc.sample_s.label.label, + roc.sample_b.label.label, + ) + for x, y in zip(roc.x, roc.y): + print(x, y) + + ax.plot( + roc.x, + roc.y, + linestyle=roc.style, + color=roc.color, + label="{}".format(roc.label), + linewidth=3, + ) + + # add text to plot + for text in self.texts: + ax.text( + text.location[0], + text.location[1], + text.text, + verticalalignment="center", + horizontalalignment="center", + transform=ax.transAxes, + weight=text.weight, + fontsize=text.size, + ) + + handles, labels = ax.get_legend_handles_labels() + ax.legend( + labels=labels, + frameon=False, + # loc=self.leg_loc, + fontsize=14, + ) + + ax.grid(linestyle="dashed") + ax.tick_params(axis="both", labelsize=14) + ax.set_xlabel(self.titles[0], fontsize=14) + ax.set_ylabel(self.titles[1], fontsize=14) + ax.set_xscale(self.scales[0]) + ax.set_yscale(self.scales[1]) + + ax.set_xlim(self.ranges[0][0], self.ranges[0][1]) + ax.set_ylim(self.ranges[1][0], self.ranges[1][1]) + + fig.tight_layout() + fig.savefig(fig_file) + + +# _______________________________________________________________________________ +def roc_plot(cfg): + + colors = ["black", "red", "blue", "purple", "green"] + styles = ["-", "--", "-.", "."] + + rocs = [] + + procstr = "" + for ip, proc in enumerate(cfg["variants"]): + sig_f = cfg["sig"] + procstr += "_{}".format(proc.name) + for ib, bkg_f in enumerate(cfg["bkg"]): + name = "{}{}_{}".format(sig_f, bkg_f, proc.name) + rocs.append( + ROC( + name, + cfg["samples"][proc][sig_f], + cfg["samples"][proc][bkg_f], + cfg["param_roc"], + colors[ib], + styles[ip], + ) + ) + + dh_list = [] + for roc in rocs: + dh_list += roc.dhs + + ROOT.RDF.RunGraphs(dh_list) + + for roc in rocs: + roc.get_roc() + + plot_name = "{}tagging{}".format(cfg["sig"], procstr) + plot = Graph(rocs, cfg["param_plot"], "plots/{}.png".format(plot_name)) + + +# ______________________________________________________________________________ +def dfhs(df, fs, fb, m, nbins, label): + + lin_array = np.arange(0.0, m + float(m) / nbins, float(m) / nbins) + exp_array = np.power(10.0, lin_array) + + a = -1.0 / (np.power(10.0, m) - 1) + b = np.power(10.0, m) / (np.power(10.0, m) - 1) + cutvals = a * exp_array + b + cutvals.sort() + + score_s = "recojet_is{}".format(fs.upper()) + score_b = "recojet_is{}".format(fb.upper()) + + binary_discr_var = "d_{}{}".format(fs, fb) + binary_discr_label = "D({},{})".format(fs, fb) + binary_discr_func = "binary_discriminant({}, {})".format(score_s, score_b) + + # print(cutvals) + df_dict = OrderedDict() + dh_dict = OrderedDict() + + print("producing roc curve: {} vs {} -- {}".format(fs, fb, label)) + df = df.Define(binary_discr_var, binary_discr_func) + + for i, cut in enumerate(cutvals): + + cut_str = "{:.0f}".format(1e5 * cut) + if i == 0: + cut_str = "00000" + + filter_expr = "{} > {}".format(binary_discr_var, cut) + fcoll_title = "{} > {:.4f}".format(binary_discr_label, cut) + fcoll_str = "{}_{}".format(binary_discr_var, cut_str) + + # define filter from previous iteration in the loop + if i == 0: + df_dict[cut] = df.Define(fcoll_str, "{}[{}]".format(binary_discr_var, filter_expr)) + else: + df_dict[cut] = df_dict[cutvals[i - 1]].Define(fcoll_str, "{}[{}]".format(binary_discr_var, filter_expr)) + + dh_dict[cut] = df_dict[cut].Histo1D( + ( + "h_{}_{}".format(fcoll_str, label), + ";{};N_{{Events}}".format(binary_discr_label), + nbins, + 0.0, + 1.0, + ), + fcoll_str, + ) + return dh_dict + + +# _______________________________________________________________________________________ +if __name__ == "__main__": + main() diff --git a/examples/FCCee/weaver/stage1.py b/examples/FCCee/weaver/stage1.py index aeb35a483e..fa3d96773c 100644 --- a/examples/FCCee/weaver/stage1.py +++ b/examples/FCCee/weaver/stage1.py @@ -1,193 +1,54 @@ -processList = { - #prefall2022 samples (generated centrally) - 'p8_ee_ZH_Znunu_Hbb_ecm240':{}, #1030000 events - 'p8_ee_ZH_Znunu_Hcc_ecm240':{}, #1060000 - 'p8_ee_ZH_Znunu_Hss_ecm240':{}, #1060000 - 'p8_ee_ZH_Znunu_Hgg_ecm240':{'fraction':0.5}, #2000000 - 'p8_ee_ZH_Znunu_Huu_ecm240':{'fraction':0.5}, #we take only half sample for uu,dd because they will go into qq label which contains both - 'p8_ee_ZH_Znunu_Hdd_ecm240':{'fraction':0.5}, #and we want for qq same number of jets as other classes; the two files 2080000 events in total, 1040000 each? -} - - -#prodTag = "FCCee/spring2021/IDEA/" #for spring2022 samples -prodTag = "FCCee/pre_fall2022_training/IDEA/" #for prefall2022 samples - -#outputDir = "" - -#Optional: ncpus, default is 4 -nCPUS = 8 - - -class RDFanalysis(): - +from examples.FCCee.weaver.config import ( + variables_pfcand, + variables_jet, + variables_event, +) + +from addons.ONNXRuntime.python.jetFlavourHelper import JetFlavourHelper +from addons.FastJet.python.jetClusteringHelper import ExclusiveJetClusteringHelper + +jetFlavourHelper = None +jetClusteringHelper = None + +# Mandatory: RDFanalysis class where the use defines the operations on the TTree +class RDFanalysis: + # __________________________________________________________ + # Mandatory: analysers funtion to define the analysers to process, please make sure you return the last dataframe, in this example it is df2 def analysers(df): - - df2 = ( - df + global jetClusteringHelper + global jetFlavourHelper - #===== VERTEX - - #MC primary vertex - .Define("MC_PrimaryVertex", "FCCAnalyses::MCParticle::get_EventPrimaryVertex(21)( Particle )" ) + from examples.FCCee.weaver.config import collections, njets - #===== CLUSTERING - #define the RP px, py, pz and e - .Define("RP_px", "ReconstructedParticle::get_px(ReconstructedParticles)") - .Define("RP_py", "ReconstructedParticle::get_py(ReconstructedParticles)") - .Define("RP_pz", "ReconstructedParticle::get_pz(ReconstructedParticles)") - .Define("RP_e", "ReconstructedParticle::get_e(ReconstructedParticles)") - .Define("RP_m", "ReconstructedParticle::get_mass(ReconstructedParticles)") - .Define("RP_q", "ReconstructedParticle::get_charge(ReconstructedParticles)") - - #build pseudo jets with the RP, using the interface that takes px,py,pz,E - #.Define("pseudo_jets", "JetClusteringUtils::set_pseudoJets_xyzm(RP_px, RP_py, RP_pz, RP_m)") - .Define("pseudo_jets", "JetClusteringUtils::set_pseudoJets(RP_px, RP_py, RP_pz, RP_e)") - #run jet clustering with all reconstructed particles. ee_genkt_algorithm, R=1.5, inclusive clustering, E-scheme - .Define("FCCAnalysesJets_ee_genkt", "JetClustering::clustering_ee_genkt(1.5, 0, 0, 0, 0, -1)(pseudo_jets)") - #get the jets out of the struct - .Define("jets_ee_genkt", "JetClusteringUtils::get_pseudoJets(FCCAnalysesJets_ee_genkt)") - #get the jets constituents out of the struct - .Define("jetconstituents_ee_genkt","JetClusteringUtils::get_constituents(FCCAnalysesJets_ee_genkt)") + ## define jet clustering parameters + jetClusteringHelper = ExclusiveJetClusteringHelper(collections["PFParticles"], njets) + ## run jet clustering + df = jetClusteringHelper.define(df) - #===== OBSERVABLES - #JET LEVEL - ###.Define("Jets_px", "JetClusteringUtils::get_px(jets_ee_genkt)") #jets_ee_genkt_px - ###.Define("Jets_py", "JetClusteringUtils::get_py(jets_ee_genkt)") - ###.Define("Jets_pz", "JetClusteringUtils::get_pz(jets_ee_genkt)") - - .Define("Jets_pt", "JetClusteringUtils::get_pt(jets_ee_genkt)") - .Define("Jets_e", "JetClusteringUtils::get_e(jets_ee_genkt)") - .Define("Jets_mass", "JetClusteringUtils::get_m(jets_ee_genkt)") - .Define("Jets_phi", "JetClusteringUtils::get_phi(jets_ee_genkt)") - .Define("Jets_theta", "JetClusteringUtils::get_theta(jets_ee_genkt)") - - #CONSTITUENT LEVEL - .Define("JetsConstituents", "JetConstituentsUtils::build_constituents_cluster(ReconstructedParticles, jetconstituents_ee_genkt)") #build jet constituents lists - - #getting the types of particles - .Alias("MCRecoAssociations0", "MCRecoAssociations#0.index") - .Alias("MCRecoAssociations1", "MCRecoAssociations#1.index") - .Define("JetsConstituents_Pids", "JetConstituentsUtils::get_PIDs_cluster(MCRecoAssociations0, MCRecoAssociations1, ReconstructedParticles, Particle, jetconstituents_ee_genkt)") - .Define("JetsConstituents_isMu", "JetConstituentsUtils::get_isMu(JetsConstituents_Pids)") - .Define("JetsConstituents_isEl", "JetConstituentsUtils::get_isEl(JetsConstituents_Pids)") - .Define("JetsConstituents_isChargedHad", "JetConstituentsUtils::get_isChargedHad(JetsConstituents_Pids, JetsConstituents)") - .Define("JetsConstituents_isGamma", "JetConstituentsUtils::get_isGamma(JetsConstituents_Pids)") - .Define("JetsConstituents_isNeutralHad", "JetConstituentsUtils::get_isNeutralHad(JetsConstituents_Pids, JetsConstituents)") - - #kinematics, displacement, PID - .Define("JetsConstituents_e", "JetConstituentsUtils::get_e(JetsConstituents)") - .Define("JetsConstituents_pt", "JetConstituentsUtils::get_pt(JetsConstituents)") - .Define("JetsConstituents_theta", "JetConstituentsUtils::get_theta(JetsConstituents)") - .Define("JetsConstituents_phi", "JetConstituentsUtils::get_phi(JetsConstituents)") - .Define("JetsConstituents_charge", "JetConstituentsUtils::get_charge(JetsConstituents)") - - .Define("JetsConstituents_erel", "JetConstituentsUtils::get_erel_cluster(jets_ee_genkt, JetsConstituents)") - .Define("JetsConstituents_erel_log", "JetConstituentsUtils::get_erel_log_cluster(jets_ee_genkt, JetsConstituents)") - .Define("JetsConstituents_thetarel", "JetConstituentsUtils::get_thetarel_cluster(jets_ee_genkt, JetsConstituents)") - .Define("JetsConstituents_phirel", "JetConstituentsUtils::get_phirel_cluster(jets_ee_genkt, JetsConstituents)") - - .Define("JetsConstituents_dndx", "JetConstituentsUtils::get_dndx(JetsConstituents, EFlowTrack_2, EFlowTrack, JetsConstituents_isChargedHad)") - .Define("JetsConstituents_mtof", "JetConstituentsUtils::get_mtof(JetsConstituents, EFlowTrack_L, EFlowTrack, TrackerHits, JetsConstituents_Pids)") - - .Define("JetsConstituents_d0_wrt0", "JetConstituentsUtils::get_d0(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_z0_wrt0", "JetConstituentsUtils::get_z0(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_phi0_wrt0", "JetConstituentsUtils::get_phi0(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_omega_wrt0", "JetConstituentsUtils::get_omega(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_tanlambda_wrt0", "JetConstituentsUtils::get_tanLambda(JetsConstituents, EFlowTrack_1)") - - .Define("JetsConstituents_Bz", "JetConstituentsUtils::get_Bz(JetsConstituents, EFlowTrack_1)") - .Define("Bz", "ReconstructedParticle2Track::Bz(ReconstructedParticles, EFlowTrack_1)") - - #.Define("JetsConstituents_Par", "ReconstructedParticle2Track::XPtoPar(ReconstructedParticles, EFlowTrack_1, MC_PrimaryVertex, Bz)") - #.Define("JetsConstituents_dxy", "JetConstituentsUtils::XPtoPar_dxy(JetsConstituents_Par)") - #.Define("JetsConstituents_dxy", "JetConstituentsUtils::XPtoPar_dxy(JetsConstituents_Par)") - #.Define("JetsConstituents_dz", "JetConstituentsUtils::XPtoPar_dz(JetsConstituents_Par)") - #.Define("JetsConstituents_phi0", "JetConstituentsUtils::XPtoPar_phi0(JetsConstituents_Par)") - #.Define("JetsConstituents_C", "JetConstituentsUtils::XPtoPar_C(JetsConstituents_Par)") - #.Define("JetsConstituents_ct", "JetConstituentsUtils::XPtoPar_ct(JetsConstituents_Par)") + ## define jet flavour tagging parameters + jetFlavourHelper = JetFlavourHelper( + collections, + jetClusteringHelper.jets, + jetClusteringHelper.constituents, + ) - .Define("JetsConstituents_dxy", "JetConstituentsUtils::XPtoPar_dxy(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") - .Define("JetsConstituents_dz", "JetConstituentsUtils::XPtoPar_dz(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") - .Define("JetsConstituents_phi0", "JetConstituentsUtils::XPtoPar_phi(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") - .Define("JetsConstituents_C", "JetConstituentsUtils::XPtoPar_C(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") - .Define("JetsConstituents_ct", "JetConstituentsUtils::XPtoPar_ct(JetsConstituents, EFlowTrack_1, MC_PrimaryVertex, Bz)") + ## define observables for tagger + df = jetFlavourHelper.define(df) - .Define("JetsConstituents_omega_cov", "JetConstituentsUtils::get_omega_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_d0_cov", "JetConstituentsUtils::get_d0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_z0_cov", "JetConstituentsUtils::get_z0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_phi0_cov", "JetConstituentsUtils::get_phi0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_tanlambda_cov", "JetConstituentsUtils::get_tanlambda_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_d0_z0_cov", "JetConstituentsUtils::get_d0_z0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_phi0_d0_cov", "JetConstituentsUtils::get_phi0_d0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_phi0_z0_cov", "JetConstituentsUtils::get_phi0_z0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_tanlambda_phi0_cov", "JetConstituentsUtils::get_tanlambda_phi0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_tanlambda_d0_cov", "JetConstituentsUtils::get_tanlambda_d0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_tanlambda_z0_cov", "JetConstituentsUtils::get_tanlambda_z0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_omega_tanlambda_cov", "JetConstituentsUtils::get_omega_tanlambda_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_omega_phi0_cov", "JetConstituentsUtils::get_omega_phi0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_omega_d0_cov", "JetConstituentsUtils::get_omega_d0_cov(JetsConstituents, EFlowTrack_1)") - .Define("JetsConstituents_omega_z0_cov", "JetConstituentsUtils::get_omega_z0_cov(JetsConstituents, EFlowTrack_1)") - - .Define("JetsConstituents_Sip2dVal", "JetConstituentsUtils::get_Sip2dVal_clusterV(jets_ee_genkt, JetsConstituents_dxy, JetsConstituents_phi0, MC_PrimaryVertex, Bz)") - .Define("JetsConstituents_Sip2dSig", "JetConstituentsUtils::get_Sip2dSig(JetsConstituents_Sip2dVal, JetsConstituents_d0_cov)") - .Define("JetsConstituents_Sip3dVal", "JetConstituentsUtils::get_Sip3dVal_clusterV(jets_ee_genkt, JetsConstituents_dxy, JetsConstituents_dz, JetsConstituents_phi0, MC_PrimaryVertex, Bz)") - .Define("JetsConstituents_Sip3dSig", "JetConstituentsUtils::get_Sip3dSig(JetsConstituents_Sip3dVal, JetsConstituents_d0_cov, JetsConstituents_z0_cov)") - .Define("JetsConstituents_JetDistVal", "JetConstituentsUtils::get_JetDistVal_clusterV(jets_ee_genkt, JetsConstituents, JetsConstituents_dxy, JetsConstituents_dz, JetsConstituents_phi0, MC_PrimaryVertex, Bz)") - .Define("JetsConstituents_JetDistSig", "JetConstituentsUtils::get_JetDistSig(JetsConstituents_JetDistVal, JetsConstituents_d0_cov, JetsConstituents_z0_cov)") + ## compute invariant mass of two leading jets + df = df.Define("jet_p4", "JetConstituentsUtils::compute_tlv_jets({})".format(jetClusteringHelper.jets)) + df = df.Define("event_invariant_mass", "JetConstituentsUtils::InvariantMass(jet_p4[0], jet_p4[1])") - #counting the types of particles per jet - .Define("njet", "JetConstituentsUtils::count_jets(JetsConstituents)") - .Define("nconst", "JetConstituentsUtils::count_consts(JetsConstituents)") - .Define("nmu", "JetConstituentsUtils::count_type(JetsConstituents_isMu)") - .Define("nel", "JetConstituentsUtils::count_type(JetsConstituents_isEl)") - .Define("nchargedhad", "JetConstituentsUtils::count_type(JetsConstituents_isChargedHad)") - .Define("nphoton", "JetConstituentsUtils::count_type(JetsConstituents_isGamma)") - .Define("nneutralhad", "JetConstituentsUtils::count_type(JetsConstituents_isNeutralHad)") - - #compute the residues jet-constituents on significant kinematic variables as a check - .Define("tlv_jets", "JetConstituentsUtils::compute_tlv_jets(jets_ee_genkt)") - .Define("sum_tlv_jcs", "JetConstituentsUtils::sum_tlv_constituents(JetsConstituents)") - .Define("de", "JetConstituentsUtils::compute_residue_energy(tlv_jets, sum_tlv_jcs)") - .Define("dpt", "JetConstituentsUtils::compute_residue_pt(tlv_jets, sum_tlv_jcs)") - .Define("dphi", "JetConstituentsUtils::compute_residue_phi(tlv_jets, sum_tlv_jcs)") - .Define("dtheta", "JetConstituentsUtils::compute_residue_theta(tlv_jets, sum_tlv_jcs)") - - #.Define("Invariant_mass", "ROOT::VecOps::InvariantMasses(jets_ee_genkt[0].pt(), jets_ee_genkt[0].rap(), jets_ee_genkt[0].phi(), jets_ee_genkt[0].m(), jets_ee_genkt[1].pt(), jets_ee_genkt[1].rap(), jets_ee_genkt[1].phi(), jets_ee_genkt[1].m())); - .Define("invariant_mass", "JetConstituentsUtils::InvariantMass(tlv_jets[0], tlv_jets[1])") - ) - return df2 + return df + # __________________________________________________________ + # Mandatory: output function, please make sure you return the branchlist as a python list def output(): - branchList = [ - #'RP_px', 'RP_py','RP_pz','RP_e', 'RP_m', 'RP_q', - #'Jets_px', 'Jets_py', 'Jets_pz', - 'Jets_e', 'Jets_mass', 'Jets_pt', 'Jets_phi', 'Jets_theta', - 'JetsConstituents_e', 'JetsConstituents_pt', 'JetsConstituents_theta', 'JetsConstituents_phi', 'JetsConstituents_charge', - 'JetsConstituents_erel', 'JetsConstituents_erel_log', 'JetsConstituents_thetarel', 'JetsConstituents_phirel', - 'JetsConstituents_dndx', 'JetsConstituents_mtof', - - 'JetsConstituents_d0_wrt0', 'JetsConstituents_z0_wrt0', 'JetsConstituents_phi0_wrt0', 'JetsConstituents_omega_wrt0', 'JetsConstituents_tanlambda_wrt0', - 'Bz', 'JetsConstituents_Bz', - #'JetsConstituents_Par', - 'JetsConstituents_dxy', 'JetsConstituents_dz', 'JetsConstituents_phi0', 'JetsConstituents_C', 'JetsConstituents_ct', + branches_pfcand = list(variables_pfcand.keys()) + branches_jet = list(variables_jet.keys()) + branches_event = list(variables_event.keys()) + + branchList = branches_event + branches_jet + branches_pfcand - 'JetsConstituents_omega_cov', 'JetsConstituents_d0_cov', 'JetsConstituents_z0_cov', 'JetsConstituents_phi0_cov', 'JetsConstituents_tanlambda_cov', - 'JetsConstituents_d0_z0_cov', 'JetsConstituents_phi0_d0_cov', 'JetsConstituents_phi0_z0_cov', - 'JetsConstituents_tanlambda_phi0_cov', 'JetsConstituents_tanlambda_d0_cov', 'JetsConstituents_tanlambda_z0_cov', - 'JetsConstituents_omega_tanlambda_cov', 'JetsConstituents_omega_phi0_cov', 'JetsConstituents_omega_d0_cov', 'JetsConstituents_omega_z0_cov', - 'JetsConstituents_Sip2dVal', 'JetsConstituents_Sip2dSig', - 'JetsConstituents_Sip3dVal', 'JetsConstituents_Sip3dSig', - 'JetsConstituents_JetDistVal', 'JetsConstituents_JetDistSig', - #'JC_Jet0_Pids', - 'JetsConstituents_isMu', - 'JetsConstituents_isEl', - 'JetsConstituents_isChargedHad', - 'JetsConstituents_isGamma', - 'JetsConstituents_isNeutralHad', - 'njet', 'nconst', - 'nmu', 'nel', 'nchargedhad', 'nphoton', 'nneutralhad', - 'de', 'dpt', 'dphi', 'dtheta', - 'invariant_mass' - ] - return branchList + return branchList diff --git a/examples/FCCee/weaver/stage2.cpp b/examples/FCCee/weaver/stage2.cpp deleted file mode 100644 index 17377a2a53..0000000000 --- a/examples/FCCee/weaver/stage2.cpp +++ /dev/null @@ -1,535 +0,0 @@ -//standard library header files -#include -#include -#include -#include -#include - - - -//ROOT header files -#include "TH1F.h" -#include "TCanvas.h" -#include "TFile.h" -#include "TTree.h" -#include "TString.h" -#include "TStyle.h" -#include -#include "ROOT/RVec.hxx" - - - - -int main(int argc, char* argv[]) { - - //usage - if( argc!= 5 ) { - std::cerr << "USAGE: ./to_jetntuple [root_inFileName] [root_outFileName] N_i N_f" << std::endl; - exit(1); - } - - //std::string inDir = ""; - std::string infileName(argv[1]); - //Opening the input file containing the tree (output of fcc analyses_jets_stage1.py) - TFile* infile = TFile::Open(infileName.c_str()); - if( !infile->IsOpen() ){ - std::cerr << "Problems opening root file. Exiting." << std::endl; - exit(-1); - } - std::cout << "-> Opened file " << infileName.c_str() << std::endl; - //Get pointer to tree object - - //std::cout << "infileName lenght: " << infileName.length() << std::endl; - char flavour = infileName[infileName.length()-6]; - std::cout << "flavour: " << flavour << std::endl; - - TTree* ev = (TTree*)infile->Get("events"); - if(!ev) { - std::cerr << "null pointer for TTree! Exiting." << std::endl; - exit(-2); - } - std::cout << "-> Opened tree " << "events" << std::endl; - //variables to be read from the tree - //event properties - ROOT::VecOps::RVec *Jets_e=0; - ROOT::VecOps::RVec *Jets_mass=0; - ROOT::VecOps::RVec *Jets_pt = 0; - ROOT::VecOps::RVec *Jets_phi = 0; - ROOT::VecOps::RVec *Jets_theta = 0; - - int nJets; - //float Bz_i; - - //properties of constituents - - ROOT::VecOps::RVec > *JetsConstituents_e = 0; - ROOT::VecOps::RVec > *JetsConstituents_pt = 0; - ROOT::VecOps::RVec > *JetsConstituents_theta = 0; - ROOT::VecOps::RVec > *JetsConstituents_phi = 0; - ROOT::VecOps::RVec > *JetsConstituents_charge = 0; - - ROOT::VecOps::RVec > *JetsConstituents_erel = 0; - ROOT::VecOps::RVec > *JetsConstituents_erel_log = 0; - ROOT::VecOps::RVec > *JetsConstituents_thetarel = 0; - ROOT::VecOps::RVec > *JetsConstituents_phirel = 0; - - ROOT::VecOps::RVec > *JetsConstituents_dndx = 0; - ROOT::VecOps::RVec > *JetsConstituents_mtof = 0; - - //ROOT::VecOps::RVec > *JetsConstituents_d0_wrt0 = 0; - //ROOT::VecOps::RVec > *JetsConstituents_z0_wrt0 = 0; - //ROOT::VecOps::RVec > *JetsConstituents_phi0_wrt0 = 0; - //ROOT::VecOps::RVec > *JetsConstituents_omega_wrt0 = 0; - //ROOT::VecOps::RVec > *JetsConstituents_tanlambda_wrt0 = 0; - - ROOT::VecOps::RVec > *JetsConstituents_dxy = 0; - ROOT::VecOps::RVec > *JetsConstituents_dz = 0; - //ROOT::VecOps::RVec > *JetsConstituents_phi0 = 0; - //ROOT::VecOps::RVec > *JetsConstituents_C = 0; - //ROOT::VecOps::RVec > *JetsConstituents_ct = 0; - - ROOT::VecOps::RVec > *JetsConstituents_omega_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_d0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_z0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_phi0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_tanlambda_cov = 0; - - ROOT::VecOps::RVec > *JetsConstituents_d0_z0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_phi0_d0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_phi0_z0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_tanlambda_phi0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_tanlambda_d0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_tanlambda_z0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_omega_tanlambda_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_omega_phi0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_omega_d0_cov = 0; - ROOT::VecOps::RVec > *JetsConstituents_omega_z0_cov = 0; - - ROOT::VecOps::RVec > *JetsConstituents_Sip2dVal = 0; - ROOT::VecOps::RVec > *JetsConstituents_Sip2dSig = 0; - ROOT::VecOps::RVec > *JetsConstituents_Sip3dVal = 0; - ROOT::VecOps::RVec > *JetsConstituents_Sip3dSig = 0; - ROOT::VecOps::RVec > *JetsConstituents_JetDistVal = 0; - ROOT::VecOps::RVec > *JetsConstituents_JetDistSig = 0; - - ROOT::VecOps::RVec > *JetsConstituents_isMu = 0; - ROOT::VecOps::RVec > *JetsConstituents_isEl = 0; - ROOT::VecOps::RVec > *JetsConstituents_isChargedHad = 0; - ROOT::VecOps::RVec > *JetsConstituents_isGamma = 0; - ROOT::VecOps::RVec > *JetsConstituents_isNeutralHad = 0; - - ROOT::VecOps::RVec* count_Const = 0; - ROOT::VecOps::RVec* count_Mu = 0; - ROOT::VecOps::RVec* count_El = 0; - ROOT::VecOps::RVec* count_ChargedHad = 0; - ROOT::VecOps::RVec* count_Photon = 0; - ROOT::VecOps::RVec* count_NeutralHad = 0; - - //Set the info for each branch of the tree to correspond to our data - - ev->SetBranchAddress("Jets_e", &Jets_e); - ev->SetBranchAddress("Jets_mass", &Jets_mass); - ev->SetBranchAddress("Jets_pt", &Jets_pt); - ev->SetBranchAddress("Jets_phi", &Jets_phi); - ev->SetBranchAddress("Jets_theta", &Jets_theta); - - ev->SetBranchAddress("JetsConstituents_e", &JetsConstituents_e); - ev->SetBranchAddress("JetsConstituents_pt", &JetsConstituents_pt); - ev->SetBranchAddress("JetsConstituents_theta", &JetsConstituents_theta); - ev->SetBranchAddress("JetsConstituents_phi", &JetsConstituents_phi); - ev->SetBranchAddress("JetsConstituents_charge", &JetsConstituents_charge); - - ev->SetBranchAddress("JetsConstituents_erel", &JetsConstituents_erel); - ev->SetBranchAddress("JetsConstituents_erel_log", &JetsConstituents_erel_log); - ev->SetBranchAddress("JetsConstituents_thetarel", &JetsConstituents_thetarel); - ev->SetBranchAddress("JetsConstituents_phirel", &JetsConstituents_phirel); - - ev->SetBranchAddress("JetsConstituents_dndx", &JetsConstituents_dndx); - ev->SetBranchAddress("JetsConstituents_mtof", &JetsConstituents_mtof); - - //ev->SetBranchAddress("JetsConstituents_d0_wrt0", &JetsConstituents_d0_wrt0); - //ev->SetBranchAddress("JetsConstituents_z0_wrt0", &JetsConstituents_z0_wrt0); - //ev->SetBranchAddress("JetsConstituents_phi0_wrt0", &JetsConstituents_phi0_wrt0); - //ev->SetBranchAddress("JetsConstituents_omega_wrt0", &JetsConstituents_omega_wrt0); - //ev->SetBranchAddress("JetsConstituents_tanlambda_wrt0", &JetsConstituents_tanlambda_wrt0); - - ev->SetBranchAddress("JetsConstituents_dxy", &JetsConstituents_dxy); - ev->SetBranchAddress("JetsConstituents_dz", &JetsConstituents_dz); - //ev->SetBranchAddress("JetsConstituents_phi0", &JetsConstituents_phi0); - //ev->SetBranchAddress("JetsConstituents_C", &JetsConstituents_C); - //ev->SetBranchAddress("JetsConstituents_ct", &JetsConstituents_ct); - - ev->SetBranchAddress("JetsConstituents_omega_cov", &JetsConstituents_omega_cov); - ev->SetBranchAddress("JetsConstituents_d0_cov", &JetsConstituents_d0_cov); - ev->SetBranchAddress("JetsConstituents_z0_cov", &JetsConstituents_z0_cov); - ev->SetBranchAddress("JetsConstituents_phi0_cov", &JetsConstituents_phi0_cov); - ev->SetBranchAddress("JetsConstituents_tanlambda_cov", &JetsConstituents_tanlambda_cov); - - ev->SetBranchAddress("JetsConstituents_d0_z0_cov", &JetsConstituents_d0_z0_cov); - ev->SetBranchAddress("JetsConstituents_phi0_d0_cov", &JetsConstituents_phi0_d0_cov); - ev->SetBranchAddress("JetsConstituents_phi0_z0_cov", &JetsConstituents_phi0_z0_cov); - ev->SetBranchAddress("JetsConstituents_tanlambda_phi0_cov", &JetsConstituents_tanlambda_phi0_cov); - ev->SetBranchAddress("JetsConstituents_tanlambda_d0_cov", &JetsConstituents_tanlambda_d0_cov); - ev->SetBranchAddress("JetsConstituents_tanlambda_z0_cov", &JetsConstituents_tanlambda_z0_cov); - ev->SetBranchAddress("JetsConstituents_omega_phi0_cov", &JetsConstituents_omega_phi0_cov); - ev->SetBranchAddress("JetsConstituents_omega_d0_cov", &JetsConstituents_omega_d0_cov); - ev->SetBranchAddress("JetsConstituents_omega_z0_cov", &JetsConstituents_omega_z0_cov); - ev->SetBranchAddress("JetsConstituents_omega_tanlambda_cov", &JetsConstituents_omega_tanlambda_cov); - - ev->SetBranchAddress("JetsConstituents_Sip2dVal", &JetsConstituents_Sip2dVal); - ev->SetBranchAddress("JetsConstituents_Sip2dSig", &JetsConstituents_Sip2dSig); - ev->SetBranchAddress("JetsConstituents_Sip3dVal", &JetsConstituents_Sip3dVal); - ev->SetBranchAddress("JetsConstituents_Sip3dSig", &JetsConstituents_Sip3dSig); - ev->SetBranchAddress("JetsConstituents_JetDistVal", &JetsConstituents_JetDistVal); - ev->SetBranchAddress("JetsConstituents_JetDistSig", &JetsConstituents_JetDistSig); - - ev->SetBranchAddress("JetsConstituents_isMu", &JetsConstituents_isMu); - ev->SetBranchAddress("JetsConstituents_isEl", &JetsConstituents_isEl); - ev->SetBranchAddress("JetsConstituents_isChargedHad", &JetsConstituents_isChargedHad); - ev->SetBranchAddress("JetsConstituents_isGamma", &JetsConstituents_isGamma); - ev->SetBranchAddress("JetsConstituents_isNeutralHad", &JetsConstituents_isNeutralHad); - - //ev->SetBranchAddress("Bz", &Bz_i); - - ev->SetBranchAddress("njet", &nJets); - ev->SetBranchAddress("nconst", &count_Const); - ev->SetBranchAddress("nmu", &count_Mu); - ev->SetBranchAddress("nel", &count_El); - ev->SetBranchAddress("nchargedhad", &count_ChargedHad); - ev->SetBranchAddress("nphoton", &count_Photon); - ev->SetBranchAddress("nneutralhad", &count_NeutralHad); - - - //we defined how we read the tree. Now we - //need to define how we write the ntuple. - - //std::string outDir(""); - std::string outfileName(argv[2]); - TFile* outfile = new TFile(outfileName.c_str(),"recreate"); - std::cout << "-> Opened outfile " << std::endl; - TTree* ntuple = new TTree("tree", "jets_Ntuple"); - std::cout << "-> Opened ntuple " << std::endl; - - //variables to write - //jet - double recojet_e, recojet_mass, recojet_pt, recojet_phi, recojet_theta; - - //constituents - float pfcand_e[1000] = {0.}; - float pfcand_pt[1000] = {0.}; - float pfcand_charge[1000] = {0.}; - float pfcand_theta[1000] = {0.}; - float pfcand_phi[1000] = {0.}; - - float pfcand_erel[1000] = {0.}; - float pfcand_erel_log[1000] = {0.}; - float pfcand_thetarel[1000] = {0.}; - float pfcand_phirel[1000] = {0.}; - - float pfcand_dndx[1000] = {0.}; - float pfcand_mtof[1000] = {0.}; - - float pfcand_dxy[1000] = {0.}; - float pfcand_dz[1000] = {0.}; - - float pfcand_dptdpt[1000] = {0.}; - float pfcand_dxydxy[1000] = {0.}; - float pfcand_dzdz[1000] = {0.}; - float pfcand_dphidphi[1000] = {0.}; - float pfcand_detadeta[1000] = {0.}; - - float pfcand_dxydz[1000] = {0.}; - float pfcand_dphidxy[1000] = {0.}; - float pfcand_phidz[1000] = {0.}; - float pfcand_phictgtheta[1000] = {0.}; - float pfcand_dxyctgtheta[1000] = {0.}; - float pfcand_dlambdadz[1000] = {0.}; - float pfcand_cctgtheta[1000] = {0.}; - float pfcand_phic[1000] = {0.}; - float pfcand_dxyc[1000] = {0.}; - float pfcand_cdz[1000] = {0.}; - - float pfcand_btagSip2dVal[1000] = {0.}; - float pfcand_btagSip2dSig[1000] = {0.}; - float pfcand_btagSip3dVal[1000] = {0.}; - float pfcand_btagSip3dSig[1000] = {0.}; - float pfcand_btagJetDistVal[1000] = {0.}; - float pfcand_btagJetDistSig[1000] = {0.}; - - float pfcand_isMu[1000] = {0.}; - float pfcand_isEl[1000] ={0.}; - float pfcand_isChargedHad[1000] ={0.}; - float pfcand_isGamma[1000] ={0.}; - float pfcand_isNeutralHad[1000] ={0.}; - - //counting species - int njet = 0; - int nconst = 0; //number of constituents of the jets - int anomaly_njets_counts_less = 0; - int anomaly_njets_counts_more = 0; - int anomaly_njets_counts = 0; - int saved_events_counts = 0; //this variable will be used to count the number of events actually saved - //int Nevents_Max = 1000000; // maximum number of events to be saved - int nphotons = 0; - int ncharged = 0; - int nchargedhad = 0; - int nneutralhad = 0; - int nel = 0; - int nmu = 0; - - - //set flags - float is_q = 0.; - float is_b = 0.; - float is_c = 0.; - float is_s = 0.; - float is_g = 0.; - float is_t = 0.; - - if (flavour == 'q') {is_q = 1.;} - if (flavour == 'b') {is_b = 1.;} - if (flavour == 'c') {is_c = 1.;} - if (flavour == 's') {is_s = 1.;} - if (flavour == 'g') {is_g = 1.;} - if (flavour == 't') {is_t = 1.;} - - std::cout << "is_q: " << is_q << std::endl; - - // In an n-tuple, we assign each variable to its own branch. - ntuple->Branch("recojet_e", &recojet_e); - ntuple->Branch("recojet_mass", &recojet_mass); - ntuple->Branch("recojet_pt", &recojet_pt); - ntuple->Branch("recojet_phi", &recojet_phi); - ntuple->Branch("recojet_theta", &recojet_theta); - - - ntuple->Branch("recojet_isQ", &is_q); - ntuple->Branch("recojet_isB", &is_b); - ntuple->Branch("recojet_isC", &is_c); - ntuple->Branch("recojet_isS", &is_s); - ntuple->Branch("recojet_isG", &is_g); - ntuple->Branch("recojet_isT", &is_t); - - ntuple->Branch("nconst", &nconst, "nconst/I"); - ntuple->Branch("nphotons", &nphotons, "nphotons/I"); - ntuple->Branch("ncharged", &ncharged, "ncharged/I"); - ntuple->Branch("nneutralhad", &nneutralhad, "nneutralhad/I"); - ntuple->Branch("nchargedhad", &nchargedhad, "nchargedhad/I"); - ntuple->Branch("nel", &nel, "nel/I"); - ntuple->Branch("nmu", &nmu, "nmu/I"); - - ntuple->Branch("pfcand_e", pfcand_e, "pfcand_e[nconst]/F"); - ntuple->Branch("pfcand_pt", pfcand_pt, "pfcand_pt[nconst]/F"); - ntuple->Branch("pfcand_charge", pfcand_charge, "pfcand_charge[nconst]/F"); - ntuple->Branch("pfcand_theta", pfcand_theta, "pfcand_theta[nconst]/F"); - ntuple->Branch("pfcand_phi", pfcand_phi, "pfcand_phi[nconst]/F"); - - ntuple->Branch("pfcand_erel", pfcand_erel, "pfcand_erel[nconst]/F"); - ntuple->Branch("pfcand_erel_log", pfcand_erel_log, "pfcand_erel_log[nconst]/F"); - ntuple->Branch("pfcand_thetarel", pfcand_thetarel, "pfcand_thetarel[nconst]/F"); - ntuple->Branch("pfcand_phirel", pfcand_phirel, "pfcand_phirel[nconst]/F"); - - ntuple->Branch("pfcand_dndx", pfcand_dndx, "pfcand_dndx[nconst]/F"); - ntuple->Branch("pfcand_mtof", pfcand_mtof, "pfcand_mtof[nconst]/F"); - - ntuple->Branch("pfcand_dxy", pfcand_dxy, "pfcand_dxy[nconst]/F"); - ntuple->Branch("pfcand_dz", pfcand_dz, "pfcand_dz[nconst]/F"); - - ntuple->Branch("pfcand_dptdpt", pfcand_dptdpt, "pfcand_dptdpt[nconst]/F"); - ntuple->Branch("pfcand_dxydxy", pfcand_dxydxy, "pfcand_dxydxy[nconst]/F"); - ntuple->Branch("pfcand_dzdz", pfcand_dzdz, "pfcand_dzdz[nconst]/F"); - ntuple->Branch("pfcand_dphidphi", pfcand_dphidphi, "pfcand_dphidphi[nconst]/F"); - ntuple->Branch("pfcand_detadeta", pfcand_detadeta, "pfcand_detadeta[nconst]/F"); - - ntuple->Branch("pfcand_dxydz", pfcand_dxydz, "pfcand_dxydz[nconst]/F"); - ntuple->Branch("pfcand_dphidxy", pfcand_dphidxy, "pfcand_dphidxy[nconst]/F"); - ntuple->Branch("pfcand_phidz", pfcand_phidz, "pfcand_phidz[nconst]/F"); - ntuple->Branch("pfcand_phictgtheta", pfcand_phictgtheta, "pfcand_phictgtheta[nconst]/F"); - ntuple->Branch("pfcand_dxyctgtheta", pfcand_dxyctgtheta, "pfcand_dxyctgtheta[nconst]/F"); - ntuple->Branch("pfcand_dlambdadz", pfcand_dlambdadz, "pfcand_dlambdadz[nconst]/F"); - ntuple->Branch("pfcand_cctgtheta", pfcand_cctgtheta, "pfcand_cctgtheta[nconst]/F"); - ntuple->Branch("pfcand_phic", pfcand_phic, "pfcand_phic[nconst]/F"); - ntuple->Branch("pfcand_dxyc", pfcand_dxyc, "pfcand_dxyc[nconst]/F"); - ntuple->Branch("pfcand_cdz", pfcand_cdz, "pfcand_cdz[nconst]/F"); - - ntuple->Branch("pfcand_btagSip2dVal", pfcand_btagSip2dVal, "pfcand_btagSip2dVal[nconst]/F"); - ntuple->Branch("pfcand_btagSip2dSig", pfcand_btagSip2dSig, "pfcand_btagSip2dSig[nconst]/F"); - ntuple->Branch("pfcand_btagSip3dVal", pfcand_btagSip3dVal, "pfcand_btagSip3dVal[nconst]/F"); - ntuple->Branch("pfcand_btagSip3dSig", pfcand_btagSip3dSig, "pfcand_btagSip3dSig[nconst]/F"); - ntuple->Branch("pfcand_btagJetDistVal", pfcand_btagJetDistVal, "pfcand_btagJetDistVal[nconst]/F"); - ntuple->Branch("pfcand_btagJetDistSig", pfcand_btagJetDistSig, "pfcand_btagJetDistSig[nconst]/F"); - - ntuple->Branch("pfcand_isMu", pfcand_isMu, "pfcand_isMu[nconst]/F"); - ntuple->Branch("pfcand_isEl", pfcand_isEl, "pfcand_isEl[nconst]/F"); - ntuple->Branch("pfcand_isChargedHad", pfcand_isChargedHad, "pfcand_isChargedHad[nconst]/F"); - ntuple->Branch("pfcand_isGamma", pfcand_isGamma, "pfcand_isGamma[nconst]/F"); - ntuple->Branch("pfcand_isNeutralHad", pfcand_isNeutralHad, "pfcand_isNeutralHad[nconst]/F"); - - int N_i = atoi(argv[3]); - int N_f = atoi(argv[4]); - int Nevents_Max = N_f - N_i; // maximum number of events to be saved - - //Run over each entry in the tree: - int nentries = ev->GetEntries(); //total number of events in the file - - std::cout<< " " << std::endl; - std::cout << "-> number of events contained in the tree: " << nentries <GetEntry(i); - - //njet = (*Jets_e).size(); - njet = nJets; - - if(i % 10000 == 0) { - std::cout<< "-----" << std::endl; - std::cout << "-> event: " << i << " - " << "#jets: " << njet << " - (*Jets_e).size(): " << (*Jets_e).size() << std::endl; - std::cout << "-----" << std::endl; - } - - if(njet != 2) { - anomaly_njets_counts += 1; - if (njet > 2) { - anomaly_njets_counts_more += 1; - } else { - anomaly_njets_counts_less += 1; - } - } - - if (njet < 2) { //exclude the events with less than two jets - continue ; - } - - //run over the jets in the event - for(int j=0; j < 2; ++j) { //we only take the two leadingjets - - recojet_e = (*Jets_e)[j]; - //jet_e = jets_e->at(j); - recojet_mass = (*Jets_mass)[j]; - recojet_pt = (*Jets_pt)[j]; - recojet_phi = (*Jets_phi)[j]; - recojet_theta = (*Jets_theta)[j]; - //n_constituents = (*jets_ends)[j] - (*jets_begins)[j]; - //nconst = (JetsConstituents_e->at(j)).size(); - nconst = (count_Const->at(j)); - nel = (count_El->at(j)); - nmu = (count_Mu->at(j)); - nchargedhad = (count_ChargedHad->at(j)); - nphotons = (count_Photon->at(j)); - nneutralhad = (count_NeutralHad->at(j)); - - - if(i % 10000 == 0) { - std::cout << "-> jet: " << j << " - " << "nconst: " << nconst << "-> (JetsConstituents_e->at(j)).size(): " << (JetsConstituents_e->at(j)).size() << std::endl; - std::cout << "-> jet_e: " << recojet_e << " - jet_pt: " << recojet_pt << std::endl; - std::cout<< "-----" << std::endl; - } - - for(int k = 0; k < nconst; ++k){ - pfcand_e[k] = (JetsConstituents_e->at(j))[k]; - //std::cout << k << ' ' << JC_e[k] << std::endl; - pfcand_pt[k] = (JetsConstituents_pt->at(j))[k]; - pfcand_theta[k] = (JetsConstituents_theta->at(j))[k]; - pfcand_phi[k] = (JetsConstituents_phi->at(j))[k]; - pfcand_charge[k] = (JetsConstituents_charge->at(j))[k]; - - pfcand_erel[k] = (JetsConstituents_erel->at(j))[k]; - pfcand_erel_log[k] = (JetsConstituents_erel_log->at(j))[k]; - pfcand_thetarel[k] = (JetsConstituents_thetarel->at(j))[k]; - pfcand_phirel[k] = (JetsConstituents_phirel->at(j))[k]; - - pfcand_dndx[k] = (JetsConstituents_dndx->at(j))[k]/1000.; //transformed in mm - pfcand_mtof[k] = (JetsConstituents_mtof->at(j))[k]; - - pfcand_dxy[k] = (JetsConstituents_dxy->at(j))[k]; - //pfcand_dz[k] = (JetsConstituents_dz->at(j))[k]; - //std::cout<at(j))[k]<at(j))[k])) pfcand_dz[k] = -9; - else pfcand_dz[k] = (JetsConstituents_dz->at(j))[k]; - pfcand_dptdpt[k] = (JetsConstituents_omega_cov->at(j))[k]; - pfcand_dxydxy[k] = (JetsConstituents_d0_cov->at(j))[k]; - pfcand_dzdz[k] = (JetsConstituents_z0_cov->at(j))[k]; - pfcand_dphidphi[k] = (JetsConstituents_phi0_cov->at(j))[k]; - pfcand_detadeta[k] = (JetsConstituents_tanlambda_cov->at(j))[k]; - - pfcand_dxydz[k] = (JetsConstituents_d0_z0_cov->at(j))[k]; - pfcand_dphidxy[k] = (JetsConstituents_phi0_d0_cov->at(j))[k]; //***** - pfcand_phidz[k] = (JetsConstituents_phi0_z0_cov->at(j))[k]; - - pfcand_phictgtheta[k] = (JetsConstituents_tanlambda_phi0_cov->at(j))[k]; - pfcand_dxyctgtheta[k] = (JetsConstituents_tanlambda_d0_cov->at(j))[k]; - pfcand_dlambdadz[k] = (JetsConstituents_tanlambda_z0_cov->at(j))[k]; - - pfcand_cctgtheta[k] = (JetsConstituents_omega_tanlambda_cov->at(j))[k]; - pfcand_phic[k] = (JetsConstituents_omega_phi0_cov->at(j))[k]; - pfcand_dxyc[k] = (JetsConstituents_omega_d0_cov->at(j))[k]; - pfcand_cdz[k] = (JetsConstituents_omega_z0_cov->at(j))[k]; - - pfcand_btagSip2dVal[k] = (JetsConstituents_Sip2dVal->at(j))[k]; - pfcand_btagSip2dSig[k] = (JetsConstituents_Sip2dSig->at(j))[k]; - //pfcand_btagSip3dVal[k] = (JetsConstituents_Sip3dVal->at(j))[k]; - //pfcand_btagSip3dSig[k] = (JetsConstituents_Sip3dSig->at(j))[k]; - //pfcand_btagJetDistVal[k] = (JetsConstituents_JetDistVal->at(j))[k]; - //pfcand_btagJetDistSig[k] = (JetsConstituents_JetDistSig->at(j))[k]; - - if (isnan((JetsConstituents_Sip3dVal->at(j))[k]))pfcand_btagSip3dVal[k] = -9; - else pfcand_btagSip3dVal[k] = JetsConstituents_Sip3dVal->at(j)[k]; - if (isnan((JetsConstituents_Sip3dSig->at(j))[k]))pfcand_btagSip3dSig[k] = -9; - else pfcand_btagSip3dSig[k] = JetsConstituents_Sip3dSig->at(j)[k]; - if (isnan((JetsConstituents_JetDistVal->at(j))[k]))pfcand_btagJetDistVal[k] = -9; - else pfcand_btagJetDistVal[k] = JetsConstituents_JetDistVal->at(j)[k]; - if (isnan((JetsConstituents_JetDistSig->at(j))[k]))pfcand_btagJetDistSig[k] = -9; - else pfcand_btagJetDistSig[k] = JetsConstituents_JetDistSig->at(j)[k]; - - pfcand_isMu[k] = (JetsConstituents_isMu->at(j))[k]; - pfcand_isEl[k] = (JetsConstituents_isEl->at(j))[k]; - pfcand_isChargedHad[k] = (JetsConstituents_isChargedHad->at(j))[k]; - pfcand_isGamma[k] = (JetsConstituents_isGamma->at(j))[k]; - pfcand_isNeutralHad[k] = (JetsConstituents_isNeutralHad->at(j))[k]; - - //std::cout << "k: "<< k << std::endl; - //counting species - /*if ( (JetsConstituents_isMu->at(j))[k] == 1 ) { - nmu += 1; - ncharged += 1; - } else if ((JetsConstituents_isEl->at(j))[k] == 1) { - nel += 1; - ncharged += 1; - } else if ( (JetsConstituents_isGamma->at(j))[k] == 1 ) { - nphotons += 1; - } else if ((JetsConstituents_isChargedHad->at(j))[k] == 1) { - nchargedhad += 1; - ncharged += 1; - } else if ((JetsConstituents_isNeutralHad->at(j))[k] == 1) { - nneutralhad += 1; - }*/ - } - ntuple->Fill(); - } - saved_events_counts += 1; //we count the num of events saved - if (saved_events_counts == Nevents_Max) { //interrupt the loop if Nevents_max events have already been saved - break; - } - } - - std::cout << "-> number of entries run: " << nentries < number of entries considered: " << saved_events_counts < number of events with njets != 2: " << anomaly_njets_counts << std::endl; - std::cout << "--> number of events with njets > 2: " << anomaly_njets_counts_more << std::endl; - std::cout << "--> number of events with njets < 2: " << anomaly_njets_counts_less << std::endl; - - - //outfile->cd(); - //outfile->mkdir("deepntuplizer/"); - //outfile->cd("deepntuplizer/"); - //ntuple->SetDirectory(gDirectory); - ntuple->Write(); - infile->Close(); - outfile->Close(); - std::cout << "-> Closed files "< n_start + numberOfEntries: + print("ERROR: requesting too many events. This file only has {}".format(numberOfEntries)) + sys.exit() + +branches_pfcand = list(variables_pfcand.keys()) +branches_jet = list(variables_jet.keys()) + +if len(branches_jet) == 0: + print("ERROR: branches_jet is empty ...") + sys.exit() + +if len(branches_pfcand) == 0: + print("ERROR: branches_pfcand is empty ...") + sys.exit() + +# print("") +# print("-> number of events: {}".format(numberOfEntries)) +# print("-> requested to run over [{},{}] event range".format(n_start, n_final)) + +# branches_pfcand = [branches_pfcand[0]] +# branches_jet = [branches_jet[-1]] + +## define variables for output tree +maxn = 500 + +match_flavor = dict() +for f in flavors: + match_flavor[f] = False + if "H{}{}".format(f, f) in input_file: + match_flavor[f] = True + +if True in match_flavor.values(): + f0 = list(match_flavor.keys())[list(match_flavor.values()).index(True)] + # print("producing '{}-flavor' jets ...".format(f0, f0)) +else: + print("ERROR: could not infer jet flavor from file name") + str_err = "ERROR: please provide input file containing: " + for f in flavors: + str_err += "H{}{} ".format(f, f) + +## output jet-wise tree +out_root = TFile(output_file, "RECREATE") +t = TTree("tree", "tree with jets") + +jet_array = dict() +for f in flavors: + b = "recojet_is{}".format(f.upper()) + jet_array[b] = array("i", [0]) + t.Branch(b, jet_array[b], "{}/I".format(b)) +for b in branches_jet: + jet_array[b] = array("f", [0]) + t.Branch(b, jet_array[b], "{}/F".format(b)) + +## need this branch to define pfcand branches +jet_npfcand = array("i", [0]) +t.Branch("jet_npfcand", jet_npfcand, "jet_npfcand/I") + +pfcand_array = dict() +for b in branches_pfcand: + pfcand_array[b] = array("f", maxn * [0]) + t.Branch(b, pfcand_array[b], "{}[jet_npfcand]/F".format(b)) + +if debug: + for key, item in jet_array.items(): + print(key) + for key, item in pfcand_array.items(): + print(key) + +# Loop over all events +for entry in range(n_start, n_final): + # Load selected branches with data from specified event + + # if (entry+1)%100 == 0: + # if (entry + 1) % 1000 == 0: + # print(" ... processed {} events ...".format(entry + 1)) + + ev.GetEntry(entry) + + njets = len(getattr(ev, branches_jet[0])) + + ## loop over jets + for j in range(njets): + + ## fill jet-based quantities + for f in flavors: + name = "recojet_is{}".format(f.upper()) + jet_array[name][0] = int(match_flavor[f]) + if debug: + print(" jet:", j, name, jet_array[name][0]) + + for name in branches_jet: + jet_array[name][0] = getattr(ev, name)[j] + if debug: + print(" jet:", j, name, getattr(ev, name)[j]) + + ## loop over constituents + jet_npfcand[0] = len(getattr(ev, branches_pfcand[0])[j]) + for k in range(jet_npfcand[0]): + for name in branches_pfcand: + pfcand_array[name][k] = getattr(ev, name)[j][k] + if debug: + print(" const:", k, name, getattr(ev, name)[j][k]) + + ## fill tree at every jet + t.Fill() + +# write tree +t.SetDirectory(out_root) +t.Write() diff --git a/examples/FCCee/weaver/stage_all.py b/examples/FCCee/weaver/stage_all.py new file mode 100644 index 0000000000..209274f811 --- /dev/null +++ b/examples/FCCee/weaver/stage_all.py @@ -0,0 +1,160 @@ +import sys +import os +import argparse +import subprocess +from subprocess import Popen, PIPE +from datetime import date +import time +import concurrent.futures + +# ________________________________________________________________________________ +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--indir", + help="path input directory", + default="/eos/experiment/fcc/ee/generation/DelphesEvents/winter2023_training/IDEA/", + ) + parser.add_argument( + "--outdir", + help="path output directory", + default="/eos/experiment/fcc/ee/jet_flavour_tagging/winter2023/samples_gen_v1", + ) + + parser.add_argument("--sample", help="sample name", default="wzp6_ee_nunuH_HXX_ecm240") + parser.add_argument("--ncpus", help="number of cpus", type=int, default=64) + parser.add_argument("--opt", help="option 1: run stage 1, 2: run stage 2, 3: all 4: clean", default="3") + + args = parser.parse_args() + indir = args.indir + outdir = args.outdir + ncpus = args.ncpus + sample = args.sample + opt = args.opt + + ## qq is merge of uu/dd + flavors = ["bb", "cc", "ss", "gg", "qq", "tautau"] + + outtmpdir = "/tmp/selvaggi/data/stage_all" + os.system("rm -rf {}".format(outtmpdir)) + os.system("mkdir -p {}".format(outtmpdir)) + os.system("mkdir -p {}".format(outdir)) + + ## fill name of stage1 files + stage1_files = dict() + for f in flavors: + stage1_files[f] = "{}/stage1_H{}.root".format(outtmpdir, f) + + edm_files = "" + + for f in flavors: + ### run stage 1 + if opt == "1" or opt == "3": + + sample_f = sample.replace("XX", f) + edm_files = "{}/{}/*.root".format(indir, sample_f) + cmd_stage1 = ( + "fccanalysis run examples/FCCee/weaver/stage1_gen.py --output {} --files-list {} --ncpus {}".format( + stage1_files[f], edm_files, ncpus + ) + ) + print("running stage 1: ") + print("") + print("{}".format(cmd_stage1)) + print("") + os.system(cmd_stage1) + + ### run stage 2 + if opt == "2" or opt == "3": + + nevents = count_events(stage1_files[f]) + nevents_per_thread = int(nevents / ncpus) + + commands_stage2 = [] + stage2_files = dict() + + stage2_final_file = "{}/stage2_H{}.root".format(outtmpdir, f) + stage2_wild_files = "{}/stage2_H{}_*.root".format(outtmpdir, f) + hadd_cmd = "hadd -f {} {}".format(stage2_final_file, stage2_wild_files) + + for i in range(ncpus): + + stage2_files[i] = "{}/stage2_H{}_{}.root".format(outtmpdir, f, i) + nstart = i * nevents_per_thread + nend = nstart + nevents_per_thread + + cmd_stage2 = "python examples/FCCee/weaver/stage2.py {} {} {} {}".format( + stage1_files[f], stage2_files[i], nstart, nend + ) + + commands_stage2.append(cmd_stage2) + + # Create a thread pool executor with 4 threads + + executor = concurrent.futures.ThreadPoolExecutor(max_workers=ncpus) + + # Submit each command to the executor + future_to_command = {executor.submit(run_command, command): command for command in commands_stage2} + + # Wait for all threads to finish + concurrent.futures.wait(future_to_command) + + # Run the final command + print("") + print("now collect and hadd stage 2 files into: {}".format(stage2_final_file)) + print(hadd_cmd) + os.system(hadd_cmd) + print("") + print("copy file to final destination...") + os.system("cp {} {}".format(stage2_final_file, outdir)) + print("file {} copied".format(outdir)) + print("cleaning up tmp files...".format(outdir)) + os.system("rm -rf {} {} {}".format(stage2_final_file, stage1_files[f], stage2_wild_files)) + print("done.") + + +# ________________________________________________________________________________ + + +def run_command(command): + # Replace this with code to run the command + print("running command: {}".format(command)) + os.system(command) + + +# ________________________________________________________________________________ +def count_events(file, tree_name="events"): + import ROOT + + # Open the ROOT file + root_file = ROOT.TFile.Open(file) + + # Get the tree from the file + tree = root_file.Get(tree_name) + + # Get the number of events in the tree + num_events = tree.GetEntries() + + return num_events + + +# ________________________________________________________________________________ +def count_events(file, tree_name="events"): + import ROOT + + # Open the ROOT file + root_file = ROOT.TFile.Open(file) + + # Get the tree from the file + tree = root_file.Get(tree_name) + + # Get the number of events in the tree + num_events = tree.GetEntries() + + return num_events + + +# _______________________________________________________________________________________ +if __name__ == "__main__": + main() diff --git a/examples/FCCee/weaver/stage_plots.py b/examples/FCCee/weaver/stage_plots.py new file mode 100644 index 0000000000..14d3672dba --- /dev/null +++ b/examples/FCCee/weaver/stage_plots.py @@ -0,0 +1,266 @@ +import ROOT +import os +import argparse + +# ________________________________________________________________________________ +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--indir", + help="path input directory", + default="/tmp/selvaggi/data/pre_winter2023_tests_v2/selvaggi_2022Nov24", + ) + parser.add_argument( + "--outdir", + help="path output directory", + default="/eos/user/s/selvaggi/www/test_tag", + ) + + args = parser.parse_args() + + # Enable multi-threading + ROOT.ROOT.EnableImplicitMT() + + from examples.FCCee.weaver.config import variables_pfcand, variables_jet, variables_event, flavors + + input_dir = args.indir + output_dir = args.outdir + + os.system("mkdir -p {}".format(output_dir)) + + for f in flavors: + + sample_a = { + "file": "{}/ntuple_test_wzp6_ee_nunuH_H{}{}.root".format(input_dir, f, f), + "flavor": f, + "label": "WZ + Pythia6", + } + sample_b = { + "file": "{}/ntuple_test_p8_ee_ZH_Znunu_H{}{}.root".format(input_dir, f, f), + "flavor": f, + "label": "Pythia8", + } + # We read the tree from the file and create a RDataFrame. + df_a = ROOT.RDataFrame("tree", sample_a["file"]) + df_b = ROOT.RDataFrame("tree", sample_b["file"]) + + print(sample_a["file"]) + print(sample_b["file"]) + + sample_a["histos_pfcand"] = dfhs_pfcand(df_a, variables_pfcand) + sample_b["histos_pfcand"] = dfhs_pfcand(df_b, variables_pfcand) + + sample_a["histos_jet"] = dfhs_jet(df_a, variables_jet) + sample_b["histos_jet"] = dfhs_jet(df_b, variables_jet) + + #sample_a["histos_event"] = dfhs_event(df_a, variables_event) + #sample_b["histos_event"] = dfhs_event(df_b, variables_event) + + # RunGraphs allows to run the event loops of the separate RDataFrame graphs + # concurrently. This results in an improved usage of the available resources + # if each separate RDataFrame can not utilize all available resources, e.g., + ROOT.RDF.RunGraphs( + list(sample_a["histos_pfcand"].values()) + + list(sample_b["histos_pfcand"].values()) + + list(sample_a["histos_jet"].values()) + + list(sample_b["histos_jet"].values()) + #+ list(sample_a["histos_event"].values()) + #+ list(sample_b["histos_event"].values()) + ) + + + for var, params in variables_pfcand.items(): + plot(sample_a, sample_b, "histos_pfcand", var, params, output_dir) + for var, params in variables_jet.items(): + plot(sample_a, sample_b, "histos_jet", var, params, output_dir) + + """ + for var, params in variables_event.items(): + plot(sample_a, sample_b, "histos_event", var, params, output_dir) + """ + +# _______________________________________________________________________________ +def dfhs_pfcand(df, vars): + + ## extract charged particles + # df_charged = df.Filter("All(abs(pfcand_charge)>0)", "select charged constituents") + df_charged = df + + ## order constituents in energy + df_sorted_e = df_charged.Define("e_sorted_id", "Reverse(Argsort(pfcand_e))") + + df_dict = dict() + + for pfcand_var, params in vars.items(): + df_var = df_sorted_e.Redefine(pfcand_var, "Take({}, e_sorted_id)".format(pfcand_var)) + var = pfcand_var.replace("pfcand_", "") + df_var = df_var.Define(var, "{}[0]".format(pfcand_var)) + df_dict[pfcand_var] = df_var.Histo1D( + ( + "h_{}".format(var), + ";{};N_{{Events}}".format(params["title"]), + params["bin"], + params["xmin"], + params["xmax"], + ), + var, + ) + return df_dict + + +# _______________________________________________________________________________ +def dfhs_jet(df, vars): + + ## extract charged particles + # df_charged = df.Filter("All(abs(pfcand_charge)>0)", "select charged constituents") + df_dict = dict() + for jet_var, params in vars.items(): + df_dict[jet_var] = df.Histo1D( + ( + "h_{}".format(jet_var), + ";{};N_{{Events}}".format(params["title"]), + params["bin"], + params["xmin"], + params["xmax"], + ), + jet_var, + ) + return df_dict + + +# _______________________________________________________________________________ +def dfhs_event(df, vars): + + ## extract charged particles + # df_charged = df.Filter("All(abs(pfcand_charge)>0)", "select charged constituents") + df_dict = dict() + for event_var, params in vars.items(): + print(event_var) + df_dict[event_var] = df.Histo1D( + ( + "h_{}".format(event_var), + ";{};N_{{Events}}".format(params["title"]), + params["bin"], + params["xmin"], + params["xmax"], + ), + event_var, + ) + return df_dict + + +# _______________________________________________________________________________ +def plot(sample_a, sample_b, histo_coll, var, params, outdir): + + dfh_a = sample_a[histo_coll][var].GetValue() + dfh_b = sample_b[histo_coll][var].GetValue() + + # Create canvas with pads for main plot and data/MC ratio + c = ROOT.TCanvas("c", "", 700, 750) + + ROOT.gStyle.SetOptStat(0) + upper_pad = ROOT.TPad("upper_pad", "", 0, 0.35, 1, 1) + lower_pad = ROOT.TPad("lower_pad", "", 0, 0, 1, 0.35) + for p in [upper_pad, lower_pad]: + p.SetLeftMargin(0.14) + p.SetRightMargin(0.05) + p.SetTickx(False) + p.SetTicky(False) + upper_pad.SetBottomMargin(0) + lower_pad.SetTopMargin(0) + lower_pad.SetBottomMargin(0.3) + upper_pad.Draw() + lower_pad.Draw() + + # Draw dfh_a + upper_pad.cd() + if params["scale"] == "log": + upper_pad.SetLogy() + dfh_a.SetMarkerStyle(20) + dfh_a.SetMarkerSize(0) + dfh_a.SetLineWidth(4) + dfh_a.SetLineColor(ROOT.kGreen + 2) + dfh_a.GetYaxis().SetLabelSize(0.045) + dfh_a.GetYaxis().SetTitleSize(0.05) + dfh_a.SetStats(0) + dfh_a.SetTitle("") + dfh_a.Draw("hist") + + # Draw dfh_b + dfh_b.SetLineColor(ROOT.kRed + 1) + dfh_b.SetLineStyle(2) + dfh_b.SetLineWidth(4) + dfh_b.Draw("hist SAME") + + # Draw ratio + lower_pad.cd() + + ratio = ROOT.TH1I( + "zero", + "", + params["bin"], + params["xmin"], + params["xmax"], + ) + ratio.SetLineColor(ROOT.kBlack) + ratio.SetLineStyle(2) + ratio.SetLineWidth(4) + ratio.SetMinimum(0.0) + ratio.SetMaximum(2.0) + ratio.GetXaxis().SetLabelSize(0.08) + ratio.GetXaxis().SetTitleSize(0.12) + ratio.GetXaxis().SetTitleOffset(1.0) + ratio.GetYaxis().SetLabelSize(0.08) + ratio.GetYaxis().SetTitleSize(0.09) + ratio.GetYaxis().SetTitle("ratio") + ratio.GetYaxis().CenterTitle() + ratio.GetYaxis().SetTitleOffset(0.7) + # ratio.GetYaxis().SetNdivisions(503, False) + ratio.GetYaxis().ChangeLabel(-1, -1, 0) + ratio.GetXaxis().SetTitle(params["title"]) + ratio.Draw("AXIS") + + ratiodata = dfh_a.Clone() + ratiodata.Sumw2() + ratiodata.Divide(dfh_b) + ratiodata.SetLineColor(ROOT.kBlack) + ratiodata.SetMarkerColor(ROOT.kBlack) + ratiodata.Draw("same e") + + # Add legend + upper_pad.cd() + legend = ROOT.TLegend(0.55, 0.68, 0.926, 0.85) + legend.SetTextFont(42) + legend.SetFillStyle(0) + legend.SetBorderSize(0) + legend.SetTextSize(0.045) + legend.SetTextAlign(12) + legend.AddEntry(dfh_a, "{} ({}-jets)".format(sample_a["label"], sample_a["flavor"]), "l") + legend.AddEntry(dfh_b, "{} ({}-jets)".format(sample_b["label"], sample_b["flavor"]), "l") + legend.Draw() + + # Add ATLAS label + text = ROOT.TLatex() + text.SetNDC() + text.SetTextFont(72) + text.SetTextSize(0.05) + text.DrawLatex(0.14, 0.91, "FCC-ee") + text.SetTextFont(42) + text.DrawLatex(0.27, 0.91, "(Delphes Simulation)") + text.SetTextSize(0.05) + text.DrawLatex(0.25, 0.78, "e^{+}e^{-} #rightarrow Z (#nu #nu) H (j j)") + text.SetTextSize(0.04) + text.DrawLatex(0.28, 0.71, "j = q, s, c, b, g") + text.SetTextSize(0.05) + text.SetTextAlign(31) + text.DrawLatex(0.95, 0.91, "#sqrt{s} = 240 GeV, 5 ab^{-1}") + + # Save the plot + figpath = "{}/{}_{}.png".format(outdir, sample_a["flavor"], var) + c.SaveAs(figpath) + + +# _______________________________________________________________________________________ +if __name__ == "__main__": + main() diff --git a/examples/basics/README.md b/examples/basics/README.md index b9db0c6c55..1cecc3d183 100644 --- a/examples/basics/README.md +++ b/examples/basics/README.md @@ -56,11 +56,36 @@ root[0] TBrowser b As shown in the screenshot above, there are two types of branches: - - Branches without a pound (#) in their name: Electron (1), Muon (2), AllMuon (3), EFlowNeutralHadron (4), Particle (5), Photon (6), ReconstructedParticles (7), EFlowPhoton (8), MCRecoAssociations (9), MissingET (10), ParticleIDs (11), Jet (12), EFlowTrack (13), EFlowTrack\_1 (14). They refer to collections of objects. + - Branches without a pound (#) in their name refer to collections of objects: Electron (1), Muon (2), AllMuon (3), EFlowNeutralHadron (4), Particle (5), Photon (6), ReconstructedParticles (7), EFlowPhoton (8), MCRecoAssociations (9), MissingET (10), ParticleIDs (11), Jet (12), EFlowTrack (13), EFlowTrack\_1 (14). - NB: "Particle" denotes the collection of Monte-Carlo particles. "Muon" contains the isolated muons, while "AllMuon" contains all muons, isolated or not. - Branches with a pound in their name: Each of the object collections listed above, e.g. "Collection", has up to six associated collections of references, i.e. indices that point to another or to the same object collection. They are labeled Collection#i, with i = 0 ... 5. For example, the Muon collection has one single - associated collection of references, Muon#0. + associated collection of references, Muon#0. + - NB2: With `winter2023` samples, the `CollectionID`s are different. The correct list can be obtained using `podio-dump` or `collInfo` script from [AuxTools](https://github.com/HEP-FCC/AuxTools), e.g.: + +``` +./collInfo -i /eos/experiment/fcc/ee/generation/DelphesEvents/winter2023/IDEA/p8_ee_ZZ_ecm240/events_051628351.root + +ID -> Collection +================== +1 -> MissingET +2 -> MCRecoAssociations +3 -> ParticleIDs +4 -> magFieldBz +5 -> TrackerHits +6 -> EFlowTrack +7 -> CalorimeterHits +8 -> Particle +9 -> Photon +10 -> EFlowTrack_L +11 -> Electron +12 -> EFlowPhoton +13 -> EFlowNeutralHadron +14 -> Jet +15 -> ReconstructedParticles +16 -> Muon +================== +``` To figure out which collection is pointed to by Muon#0 (or by any other collection of references), one can look at the value of Muon#0.collectionID (see screenshot below). The collectionID of Muon#0 is the collection number 7 (in the example file used here), which, in the list of "object collections" above, corresponds to the collection of ReconstructedParticles. diff --git a/setup.sh b/setup.sh index f079dd5928..d5dd2e4f28 100644 --- a/setup.sh +++ b/setup.sh @@ -1,19 +1,32 @@ if [ "${0}" != "${BASH_SOURCE}" ]; then - if [ -z "${KEY4HEP_STACK}" ]; then + # Determinig the location of this setup script + export LOCAL_DIR=$(cd $(dirname "${BASH_SOURCE}") && pwd) + + # Sourcing of the stack + if [ -n "${KEY4HEP_STACK}" ]; then + echo "INFO: Key4hep stack already set up." + elif [ -f "${LOCAL_DIR}/.fccana/stackpin" ]; then + STACK_PATH=$(<${LOCAL_DIR}/.fccana/stackpin) + echo "INFO: Sourcing pinned Key4hep stack..." + echo " ${STACK_PATH}" + source ${STACK_PATH} + else source /cvmfs/fcc.cern.ch/sw/latest/setup.sh + fi + + export PYTHONPATH=${LOCAL_DIR}:${PYTHONPATH} + export PYTHONPATH=${LOCAL_DIR}/python:${PYTHONPATH} + export PATH=${LOCAL_DIR}/bin:${PATH} + export LD_LIBRARY_PATH=${LOCAL_DIR}/install/lib:${LD_LIBRARY_PATH} + export CMAKE_PREFIX_PATH=${LOCAL_DIR}/install:${CMAKE_PREFIX_PATH} + export ROOT_INCLUDE_PATH=${LOCAL_DIR}/install/include:${ROOT_INCLUDE_PATH} + + export ONNXRUNTIME_ROOT_DIR=`python -c "import onnxruntime; print(onnxruntime.__path__[0]+'/../../../..')" 2> /dev/null` + if [ -z "${ONNXRUNTIME_ROOT_DIR}" ]; then + echo "WARNING: ONNX Runtime not found! Related analyzers won't be build..." else - echo "INFO: Key4hep stack already set up." + export LD_LIBRARY_PATH=${ONNXRUNTIME_ROOT_DIR}/lib:${LD_LIBRARY_PATH} fi - export PYTHONPATH=$PWD:$PYTHONPATH - export PYTHONPATH=$PWD/python:$PYTHONPATH - export PATH=$PWD/bin:$PATH - export LD_LIBRARY_PATH=$PWD/install/lib:$LD_LIBRARY_PATH - export CMAKE_PREFIX_PATH=$PWD/install:$CMAKE_PREFIX_PATH - export ROOT_INCLUDE_PATH=$PWD/install/include:$ROOT_INCLUDE_PATH - export LOCAL_DIR=$PWD - export LD_LIBRARY_PATH=`python -m awkward.config --libdir`:$LD_LIBRARY_PATH - export ONNXRUNTIME_ROOT_DIR=`python -c "import onnxruntime; print(onnxruntime.__path__[0]+'/../../../..')"` - export LD_LIBRARY_PATH=$ONNXRUNTIME_ROOT_DIR/lib:$LD_LIBRARY_PATH else echo "ERROR: This script is meant to be sourced!" fi diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 2cf6289df2..dbb4110741 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -9,6 +9,7 @@ add_integration_test_2("examples/FCCee/higgs/mH-recoil/mumu/analysis_stage1.py") add_integration_test("examples/FCCee/higgs/mH-recoil/mumu/analysis_stage1.py") add_integration_test("examples/FCCee/flavour/Bc2TauNu/analysis_B2TauNu_truth.py") add_integration_test("examples/FCCee/test/jet_constituents.py") +add_integration_test("examples/FCCee/vertex_lcfiplus/analysis_V0.py") # TODO: make this test run in the spack build environment #add_generic_test(build_new_case_study "tests/build_new_case_study.sh") diff --git a/tests/unittest/algorithms.cpp b/tests/unittest/algorithms.cpp index 0ea19c6e1b..9e5ae47808 100644 --- a/tests/unittest/algorithms.cpp +++ b/tests/unittest/algorithms.cpp @@ -3,6 +3,29 @@ #include "catch2/catch_test_macros.hpp" #include +TEST_CASE("sphericityFit", "[algorithms]") { + ROOT::VecOps::RVec x{1., 0., -1}; + ROOT::VecOps::RVec y{0., 1., 0.}; + ROOT::VecOps::RVec z{0., 0., 0.}; + FCCAnalyses::Algorithms::sphericityFit sphFit{x, y, z}; + double params[] = {0., 1., 0.}; + REQUIRE(sphFit(params) == Catch::Approx(1.)); +} + +TEST_CASE("minimize_sphericity", "[algorithms]") { + ROOT::VecOps::RVec x{0., 1., 3., 7., 11., 3.}; + ROOT::VecOps::RVec y{0., -1., 3., -7., -11., .3}; + ROOT::VecOps::RVec z{5., -3., 1., 4., 2., -4}; + auto res = FCCAnalyses::Algorithms::minimize_sphericity()(x, y, z); + REQUIRE(res[0] == Catch::Approx(.28065)); + REQUIRE(res[1] == Catch::Approx(269.09445)); + REQUIRE(res[2] == Catch::Approx(1994.81445)); + REQUIRE(res[3] == Catch::Approx(-263.70053)); + REQUIRE(res[4] == Catch::Approx(2012.12073)); + REQUIRE(res[5] == Catch::Approx(77.21406)); + REQUIRE(res[6] == Catch::Approx(721.20111)); +} + TEST_CASE("Mass", "[algorithms]") { ROOT::VecOps::RVec pVec; edm4hep::ReconstructedParticleData p;