diff --git a/notebooks_tsqr/NightLog.ipynb b/notebooks_tsqr/NightLog.ipynb
new file mode 100644
index 0000000..705b68e
--- /dev/null
+++ b/notebooks_tsqr/NightLog.ipynb
@@ -0,0 +1,755 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "0",
+ "metadata": {},
+ "source": [
+ "# Night Log"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Parameters. Set defaults here.\n",
+ "# Times Square replaces this cell with the user's parameters.\n",
+ "record_limit = '999'\n",
+ "day_obs = 'TODAY' # TODAY, YESTERDAY, YYYY-MM-DD\n",
+ "number_of_days = '1' # Total number of days of data to display (ending on day_obs)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Only use packages available in the Rubin Science Platform\n",
+ "import requests\n",
+ "from collections import defaultdict\n",
+ "import pandas as pd\n",
+ "from pprint import pp\n",
+ "from urllib.parse import urlencode\n",
+ "from IPython.display import display, Markdown, display_markdown\n",
+ "from matplotlib import pyplot as plt\n",
+ "import os\n",
+ "from datetime import datetime, date, timedelta"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "limit = int(record_limit)\n",
+ "\n",
+ "match day_obs.lower():\n",
+ " case 'today':\n",
+ " date = datetime.now().date()\n",
+ " case 'yesterday':\n",
+ " date = datetime.now().date()-timedelta(days=1)\n",
+ " case _:\n",
+ " date = datetime.strptime(dd, '%Y-%m-%d').date()\n",
+ "\n",
+ "days = int(number_of_days)\n",
+ "\n",
+ "# Thus: [min_day_obs,max_day_obs)\n",
+ "min_day_obs = (date - timedelta(days=days-1)).strftime('%Y%m%d') # Inclusive\n",
+ "max_day_obs = (date + timedelta(days=1)).strftime('%Y%m%d') # prep for Exclusive\n",
+ "\n",
+ "response_timeout = 3.05 # seconds, how long to wait for connection\n",
+ "read_timeout = 20 # seconds\n",
+ "timeout = (float(response_timeout), float(read_timeout))\n",
+ "\n",
+ "summit = 'https://summit-lsp.lsst.codes'\n",
+ "usdf = 'https://usdf-rsp-dev.slac.stanford.edu'\n",
+ "tucson = 'https://tucson-teststand.lsst.codes'\n",
+ "\n",
+ "# Use server=tucson for dev testing\n",
+ "server = os.environ.get('EXTERNAL_INSTANCE_URL', summit)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f'Report from {server} over {number_of_days} nights'\n",
+ " f' from {min_day_obs} to {date}. ')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# For Times Square, comment out next line and past next cell with contents of local python file.\n",
+ "#! from lsst.ts.logging_and_reporting.source_adapters import ExposurelogAdapter, NarrativelogAdapter\n",
+ "# Once our logrep package has been installed in RSP, we can use the simpler \"import\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Comment out the import in the cell above this one.\n",
+ "# Paste contents of source_adapters.py in new cell below this one."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# This file is part of ts_logging_and_reporting.\n",
+ "#\n",
+ "# Developed for Vera C. Rubin Observatory Telescope and Site Systems.\n",
+ "# This product includes software developed by the LSST Project\n",
+ "# (https://www.lsst.org).\n",
+ "# See the COPYRIGHT file at the top-level directory of this distribution\n",
+ "# for details of code ownership.\n",
+ "#\n",
+ "# This program is free software: you can redistribute it and/or modify\n",
+ "# it under the terms of the GNU General Public License as published by\n",
+ "# the Free Software Foundation, either version 3 of the License, or\n",
+ "# (at your option) any later version.\n",
+ "#\n",
+ "# This program is distributed in the hope that it will be useful,\n",
+ "# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
+ "# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
+ "# GNU General Public License for more details.\n",
+ "#\n",
+ "# You should have received a copy of the GNU General Public License\n",
+ "# along with this program. If not, see .\n",
+ "\n",
+ "\n",
+ "############################################\n",
+ "# Python Standard Library\n",
+ "from urllib.parse import urlencode\n",
+ "import itertools\n",
+ "from datetime import datetime\n",
+ "from warnings import warn\n",
+ "from collections import defaultdict\n",
+ "\n",
+ "############################################\n",
+ "# External Packages\n",
+ "import requests\n",
+ "\n",
+ "MAX_CONNECT_TIMEOUT = 3.1 # seconds\n",
+ "MAX_READ_TIMEOUT = 90 * 60 # seconds\n",
+ "\n",
+ "class ApiAdapter:\n",
+ " # TODO document class including all class variables.\n",
+ " def __init__(self, *,\n",
+ " server_url='https://tucson-teststand.lsst.codes',\n",
+ " connect_timeout=1.05, # seconds\n",
+ " read_timeout=2, # seconds\n",
+ " ):\n",
+ " self.server = server_url\n",
+ " self.c_timeout = min(MAX_CONNECT_TIMEOUT,\n",
+ " float(connect_timeout)) # seconds\n",
+ " self.r_timeout = min(MAX_READ_TIMEOUT, # seconds\n",
+ " float(read_timeout))\n",
+ " self.timeout = (self.c_timeout, self.r_timeout)\n",
+ "\n",
+ " # We may be accessing several endpoints of an API.\n",
+ " # If so, we will get different types of records for each.\n",
+ " # The following are for the \"primary_endpoint\".\n",
+ " self.ignore_fields = list()\n",
+ " self.categoricals = list()\n",
+ " self.foreign_keys = list()\n",
+ "\n",
+ " service = None\n",
+ " endpoints = None\n",
+ "\n",
+ "\n",
+ " def check_endpoints(self, timeout=None):\n",
+ " to = (timeout or self.timeout)\n",
+ " print(f'Try connect to each endpoint of {self.server}/{self.service} '\n",
+ " f'using timeout={to}.')\n",
+ " url_http_status_code = dict()\n",
+ " for ep in self.endpoints:\n",
+ " url = f'{self.server}/{self.service}/{ep}'\n",
+ " try:\n",
+ " r = requests.get(url, timeout=(timeout or self.timeout))\n",
+ " except:\n",
+ " url_http_status_code[url] = 'timeout'\n",
+ " else:\n",
+ " url_http_status_code[url] = r.status_code\n",
+ " return url_http_status_code\n",
+ "\n",
+ "\n",
+ " def analytics(self, recs, categorical_fields=None):\n",
+ " if len(recs) == 0:\n",
+ " return dict(fields=[],\n",
+ " facet_fields=set(),\n",
+ " facets=dict())\n",
+ "\n",
+ " non_cats = set([\n",
+ " 'tags', 'urls', 'message_text', 'id', 'date_added',\n",
+ " 'obs_id', 'day_obs', 'seq_num', 'parent_id', 'user_id',\n",
+ " 'date_invalidated', 'date_begin', 'date_end',\n",
+ " 'time_lost', # float\n",
+ " # 'systems','subsystems','cscs', # values need special handling\n",
+ " ])\n",
+ " flds = set(recs[0].keys())\n",
+ " if not categorical_fields:\n",
+ " categorical_fields = flds\n",
+ " ignore_fields = flds - categorical_fields\n",
+ " facflds = flds - ignore_fields\n",
+ "\n",
+ " # facets(field) = set(value-1, value-2, ...)\n",
+ " facets = {fld: set([str(r[fld])\n",
+ " for r in recs if not isinstance(r[fld], list)])\n",
+ " for fld in facflds}\n",
+ " return dict(fields=flds,\n",
+ " facet_fields=facflds,\n",
+ " facets=facets,\n",
+ " )\n",
+ "\n",
+ "\n",
+ "# Not available on SLAC (usdf) as of 9/9/2024.\n",
+ "class NightReportAdapter(ApiAdapter):\n",
+ " service = \"nightreport\"\n",
+ " endpoints = ['reports']\n",
+ " primary_endpoint = 'reports'\n",
+ "\n",
+ "class NarrativelogAdapter(ApiAdapter):\n",
+ " \"\"\"TODO full documentation\n",
+ " \"\"\"\n",
+ " service = 'narrativelog'\n",
+ " endpoints = ['messages',]\n",
+ "\n",
+ "\n",
+ " primary_endpoint = 'messages'\n",
+ " fields = {'category',\n",
+ " 'components',\n",
+ " 'cscs',\n",
+ " 'date_added',\n",
+ " 'date_begin',\n",
+ " 'date_end',\n",
+ " 'date_invalidated',\n",
+ " 'id',\n",
+ " 'is_human',\n",
+ " 'is_valid',\n",
+ " 'level',\n",
+ " 'message_text',\n",
+ " 'parent_id',\n",
+ " 'primary_hardware_components',\n",
+ " 'primary_software_components',\n",
+ " 'site_id',\n",
+ " 'subsystems',\n",
+ " 'systems',\n",
+ " 'tags',\n",
+ " 'time_lost',\n",
+ " 'time_lost_type',\n",
+ " 'urls',\n",
+ " 'user_agent',\n",
+ " 'user_id'}\n",
+ " filters = {\n",
+ " 'site_ids',\n",
+ " 'message_text', # Message text contain ...\n",
+ " 'min_level', # inclusive\n",
+ " 'max_level', # exclusive\n",
+ " 'user_ids',\n",
+ " 'user_agents',\n",
+ " 'categories',\n",
+ " 'exclude_categories',\n",
+ " 'time_lost_types',\n",
+ " 'exclude_time_lost_types',\n",
+ " 'tags', # at least one must be present.\n",
+ " 'exclude_tags', # all must be absent\n",
+ " 'systems',\n",
+ " 'exclude_systems',\n",
+ " 'subsystems',\n",
+ " 'exclude_subsystems',\n",
+ " 'cscs',\n",
+ " 'exclude_cscs',\n",
+ " 'components',\n",
+ " 'exclude_components',\n",
+ " 'primary_software_components',\n",
+ " 'exclude_primary_software_components',\n",
+ " 'primary_hardware_components',\n",
+ " 'exclude_primary_hardware_components',\n",
+ " 'urls',\n",
+ " 'min_time_lost',\n",
+ " 'max_time_lost',\n",
+ " 'has_date_begin',\n",
+ " 'min_date_begin',\n",
+ " 'max_date_begin',\n",
+ " 'has_date_end',\n",
+ " 'min_date_end',\n",
+ " 'max_date_end',\n",
+ " 'is_human', # Allowed: either, true, false; Default=either\n",
+ " 'is_valid', # Allowed: either, true, false; Default=true\n",
+ " 'min_date_added', # inclusive, TAI ISO string, no TZ\n",
+ " 'max_date_added', # exclusive, TAI ISO string, no TZ\n",
+ " 'has_date_invalidated',\n",
+ " 'min_date_invalidated',\n",
+ " 'max_date_invalidated',\n",
+ " 'has_parent_id',\n",
+ " 'order_by',\n",
+ " 'offset',\n",
+ " 'limit'\n",
+ " }\n",
+ "\n",
+ " def get_messages(self,\n",
+ " site_ids=None,\n",
+ " message_text=None,\n",
+ " min_date_end=None,\n",
+ " max_date_end=None,\n",
+ " is_human='either',\n",
+ " is_valid='either',\n",
+ " offset=None,\n",
+ " limit=None,\n",
+ " outfields=None,\n",
+ " ):\n",
+ " qparams = dict(is_human=is_human, is_valid=is_valid)\n",
+ " if site_ids:\n",
+ " qparams['site_ids'] = site_ids\n",
+ " if message_text:\n",
+ " qparams['message_text'] = message_text\n",
+ " if min_date_end:\n",
+ " qparams['min_date_end'] = min_date_end\n",
+ " if max_date_end:\n",
+ " qparams['max_date_end'] = max_date_end\n",
+ " if limit:\n",
+ " qparams['limit'] = limit\n",
+ "\n",
+ " qstr = urlencode(qparams)\n",
+ " url = f'{self.server}/{self.service}/messages?{qstr}'\n",
+ " try:\n",
+ " recs = requests.get(url, timeout=self.timeout).json()\n",
+ " except Exception as err:\n",
+ " warn(f'No {self.service} records retrieved: {err}')\n",
+ " recs = []\n",
+ " if len(recs) == 0:\n",
+ " raise Exception(f'No records retrieved from {url}')\n",
+ "\n",
+ " if recs:\n",
+ " recs.sort(key=lambda r: r['date_begin'])\n",
+ " self.recs = recs\n",
+ " return recs\n",
+ "\n",
+ " def get_timelost(self, rollup='day'):\n",
+ " day_tl = dict() # day_tl[day] = totalDayTimeLost\n",
+ " for day,dayrecs in itertools.groupby(\n",
+ " self.recs,\n",
+ " key=lambda r: datetime.fromisoformat(r['date_begin']).date().isoformat()\n",
+ " ):\n",
+ " day_tl[day] = sum([r['time_lost'] for r in dayrecs])\n",
+ " return day_tl\n",
+ "\n",
+ "class ExposurelogAdapter(ApiAdapter):\n",
+ " \"\"\"TODO full documentation\n",
+ "\n",
+ " EXAMPLES:\n",
+ " gaps,recs = logrep_utils.ExposurelogAdapter(server_url='https://usdf-rsp-dev.slac.stanford.edu').get_observation_gaps('LSSTComCam')\n",
+ " gaps,recs = logrep_utils.ExposurelogAdapter(server_url='[[https://tucson-teststand.lsst.codes').get_observation_gaps('LSSTComCam')\n",
+ " \"\"\"\n",
+ " ignore_fields = ['id']\n",
+ " service = 'exposurelog'\n",
+ " endpoints = [\n",
+ " 'instruments',\n",
+ " 'exposures',\n",
+ " 'messages',\n",
+ " ]\n",
+ " primary_endpoint = 'messages'\n",
+ " fields = {'date_added',\n",
+ " 'date_invalidated',\n",
+ " 'day_obs',\n",
+ " 'exposure_flag',\n",
+ " 'id',\n",
+ " 'instrument',\n",
+ " 'is_human',\n",
+ " 'is_valid',\n",
+ " 'level',\n",
+ " 'message_text',\n",
+ " 'obs_id',\n",
+ " 'parent_id',\n",
+ " 'seq_num',\n",
+ " 'site_id',\n",
+ " 'tags',\n",
+ " 'urls',\n",
+ " 'user_agent',\n",
+ " 'user_id'}\n",
+ " filters = {\n",
+ " 'site_ids',\n",
+ " 'obs_id',\n",
+ " 'instruments',\n",
+ " 'min_day_obs', # inclusive, integer in form YYYMMDD\n",
+ " 'max_day_obs', # exclusive, integer in form YYYMMDD\n",
+ " 'min_seq_num',\n",
+ " 'max_seq_num',\n",
+ " 'message_text', # Message text contain ...\n",
+ " 'min_level', # inclusive\n",
+ " 'max_level', # exclusive\n",
+ " 'tags', # at least one must be present.\n",
+ " 'urls',\n",
+ " 'exclude_tags', # all must be absent\n",
+ " 'user_ids',\n",
+ " 'user_agents',\n",
+ " 'is_human', # Allowed: either, true, false; Default=either\n",
+ " 'is_valid', # Allowed: either, true, false; Default=true\n",
+ " 'exposure_flags',\n",
+ " 'min_date_added', # inclusive, TAI ISO string, no TZ\n",
+ " 'max_date_added', # exclusive, TAI ISO string, no TZ\n",
+ " 'has_date_invalidated',\n",
+ " 'min_date_invalidated',\n",
+ " 'max_date_invalidated',\n",
+ " 'has_parent_id',\n",
+ " 'order_by',\n",
+ " 'offset',\n",
+ " 'limit'\n",
+ " }\n",
+ "\n",
+ "\n",
+ " def check_endpoints(self, timeout=None):\n",
+ " to = (timeout or self.timeout)\n",
+ " print(f'Try connect to each endpoint of {self.server}/{self.service} '\n",
+ " f'using timeout={to}.')\n",
+ " url_http_status_code = dict()\n",
+ "\n",
+ " for ep in self.endpoints:\n",
+ " qstr = '?instrument=na' if ep == 'exposures' else ''\n",
+ " url = f'{self.server}/{self.service}/{ep}{qstr}'\n",
+ " try:\n",
+ " r = requests.get(url, timeout=to)\n",
+ " except:\n",
+ " url_http_status_code[url] = 'timeout'\n",
+ " else:\n",
+ " url_http_status_code[url] = r.status_code\n",
+ " return url_http_status_code\n",
+ "\n",
+ "\n",
+ " def get_instruments(self):\n",
+ " url = f'{self.server}/{self.service}/instruments'\n",
+ " try:\n",
+ " instruments = requests.get(url, timeout=self.timeout).json()\n",
+ " except Exception as err:\n",
+ " warn(f'No instruments retrieved: {err}')\n",
+ " instruments = dict(dummy=[])\n",
+ " # Flatten the lists\n",
+ " return list(itertools.chain.from_iterable(instruments.values()))\n",
+ "\n",
+ " def get_exposures(self, instrument, registry=1):\n",
+ " qparams = dict(instrument=instrument, registery=registry)\n",
+ " url = f'{self.server}/{self.service}/exposures?{urlencode(qparams)}'\n",
+ " try:\n",
+ " recs = requests.get(url, timeout=self.timeout).json()\n",
+ " except Exception as err:\n",
+ " warn(f'No exposures retrieved: {err}')\n",
+ " recs = []\n",
+ " return recs\n",
+ "\n",
+ " def get_messages(self,\n",
+ " site_ids=None,\n",
+ " obs_ids=None,\n",
+ " instruments=None,\n",
+ " message_text=None,\n",
+ " min_day_obs=None,\n",
+ " max_day_obs=None,\n",
+ " is_human='either',\n",
+ " is_valid='either',\n",
+ " exposure_flags=None,\n",
+ " offset=None,\n",
+ " limit=None,\n",
+ " ):\n",
+ " qparams = dict(is_human=is_human, is_valid=is_valid)\n",
+ " if site_ids:\n",
+ " qparams['site_ids'] = site_ids\n",
+ " if obs_ids:\n",
+ " qparams['obs_ids'] = obs_ids\n",
+ " if instruments:\n",
+ " qparams['instruments'] = instruments\n",
+ " if min_day_obs:\n",
+ " qparams['min_day_obs'] = min_day_obs\n",
+ " if max_day_obs:\n",
+ " qparams['max_day_obs'] = max_day_obs\n",
+ " if exposure_flags:\n",
+ " qparams['exposure_flags'] = exposure_flags\n",
+ " if offset:\n",
+ " qparams['offset'] = offset\n",
+ " if limit:\n",
+ " qparams['limit'] = limit\n",
+ "\n",
+ " qstr = urlencode(qparams)\n",
+ " url = f'{self.server}/{self.service}/messages?{qstr}'\n",
+ " recs = []\n",
+ " try:\n",
+ " response = requests.get(url, timeout=self.timeout)\n",
+ " recs = response.json()\n",
+ " except Exception as err:\n",
+ " warnings.warn(f'No {self.service} records retrieved: {err}')\n",
+ "\n",
+ " if len(recs) == 0:\n",
+ " warn(f'No records retrieved from {url}')\n",
+ "\n",
+ " if recs:\n",
+ " recs.sort(key=lambda r: r['day_obs'])\n",
+ " self.recs = recs\n",
+ " return recs\n",
+ "\n",
+ " def get_observation_gaps(self, instruments=None,\n",
+ " min_day_obs=None, # YYYYMMDD\n",
+ " max_day_obs=None, # YYYYMMDD\n",
+ " ):\n",
+ " if not instruments:\n",
+ " instruments = self.get_instruments()\n",
+ " assert isinstance(instruments,list), \\\n",
+ " f'\"instruments\" must be a list. Got {instruments!r}'\n",
+ " # inst_day_rollupol[instrument] => dict[day] => exposureGapInMinutes\n",
+ " inst_day_rollup = defaultdict(dict) # Instrument/Day rollup\n",
+ "\n",
+ " for instrum in instruments:\n",
+ " recs = self.get_exposures(instrum)\n",
+ " instrum_gaps = dict()\n",
+ " for day,dayrecs in itertools.groupby(recs,\n",
+ " key=lambda r: r['day_obs']):\n",
+ " gaps = list()\n",
+ " begin = end = None\n",
+ " for rec in dayrecs:\n",
+ " begin = rec['timespan_begin']\n",
+ " if end:\n",
+ " # span in minutes\n",
+ " diff = (datetime.fromisoformat(begin)\n",
+ " - datetime.fromisoformat(end)\n",
+ " ).total_seconds() / 60.0\n",
+ "\n",
+ " gaps.append((\n",
+ " datetime.fromisoformat(end).time().isoformat(),\n",
+ " datetime.fromisoformat(begin).time().isoformat(),\n",
+ " diff\n",
+ " ))\n",
+ " end = rec['timespan_end']\n",
+ " instrum_gaps[day] = gaps\n",
+ "\n",
+ " #!roll = dict()\n",
+ " # Rollup gap times by day\n",
+ " for day,tuples in instrum_gaps.items():\n",
+ " #!roll[day] = sum([t[2] for t in tuples])\n",
+ " inst_day_rollup[instrum][day] = sum([t[2] for t in tuples])\n",
+ "\n",
+ " return inst_day_rollup\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "class Dashboard: # TODO Complete and move to its own file.\n",
+ " \"\"\"Verify that we can get to all the API endpoints and databases we need for\n",
+ " any of our sources.\n",
+ " \"\"\"\n",
+ "\n",
+ " envs = dict(\n",
+ " summit = 'https://summit-lsp.lsst.codes',\n",
+ " usdf_dev = 'https://usdf-rsp-dev.slac.stanford.edu',\n",
+ " tucson = 'https://tucson-teststand.lsst.codes',\n",
+ " # Environments not currently used:\n",
+ " # rubin_usdf_dev = '',\n",
+ " # data_lsst_cloud = '',\n",
+ " # usdf = '',\n",
+ " # base_data_facility = '',\n",
+ " # rubin_idf_int = '',\n",
+ " )\n",
+ " adapters = [ExposurelogAdapter,\n",
+ " NarrativelogAdapter,\n",
+ " # NightReportAdapter, # TODO\n",
+ " ]\n",
+ "\n",
+ " def report(self, timeout=None):\n",
+ " url_status = dict()\n",
+ " for env,server in self.envs.items():\n",
+ " for adapter in self.adapters:\n",
+ " service = adapter(server_url=server)\n",
+ " # url_status[endpoint_url] = http_status_code\n",
+ " url_status.update(service.check_endpoints(timeout=timeout))\n",
+ " total_cnt = 0\n",
+ " good_cnt = 0\n",
+ " good = list()\n",
+ " print('\\nStatus for each endpoint URL:')\n",
+ " for url,stat in url_status.items():\n",
+ " print(f'{stat}\\t{url}')\n",
+ " total_cnt += 1\n",
+ " if stat == 200:\n",
+ " good_cnt += 1\n",
+ " good.append(url)\n",
+ " print(f'\\nConnected to {good_cnt} out of {total_cnt} endpoints.')\n",
+ " return good_cnt, good\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8",
+ "metadata": {},
+ "source": [
+ "# Exposure Log"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "service_adapter = ExposurelogAdapter(server_url=server)\n",
+ "source = f'{server}'\n",
+ "try:\n",
+ " exposure_recs = service_adapter.get_messages(limit=limit,\n",
+ " min_day_obs=min_day_obs,\n",
+ " max_day_obs=max_day_obs,\n",
+ " )\n",
+ "except Exception as err:\n",
+ " exposure_recs = []\n",
+ " msg = f'ERROR getting records from {server=}: {err=}'\n",
+ " raise Exception(msg)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "10",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f'Retrieved {len(exposure_recs)} records')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "11",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if exposure_recs:\n",
+ " new_column_names = dict(message_text='message',\n",
+ " date_added='date'\n",
+ " )\n",
+ " df = pd.DataFrame(exposure_recs).rename(columns=new_column_names)\n",
+ " user_df = df[['date','message']]\n",
+ " \n",
+ " display_markdown(f'### Exposure log for {number_of_days} days {min_day_obs} to {max_day_obs}', raw=True)\n",
+ " display(user_df)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "12",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gaps = service_adapter.get_observation_gaps()\n",
+ "for instrument, day_gaps in gaps.items():\n",
+ " display_markdown(f'### Date vs Observation Gap (minutes) for {instrument=!s}', raw=True)\n",
+ " x,y = zip(*day_gaps.items())\n",
+ " df = pd.DataFrame(dict(day=x,minutes=y))\n",
+ " df.plot.bar(x='day', y='minutes', title=f'{instrument=!s}')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "13",
+ "metadata": {},
+ "source": [
+ "# Narrative Log\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "14",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "service_adapter = NarrativelogAdapter(server_url=server)\n",
+ "try:\n",
+ " # date like '2000-01-02 12:00:00'\n",
+ " # str(datetime(2000, 1, 2, 12, 0, 0))\n",
+ " narrative_recs = service_adapter.get_messages(limit=limit,\n",
+ " min_date_end=str(datetime.strptime(min_day_obs,'%Y%m%d')),\n",
+ " max_date_end=str(datetime.strptime(max_day_obs,'%Y%m%d')),\n",
+ " )\n",
+ "except Exception as err:\n",
+ " narrative_recs = []\n",
+ " msg = f'ERROR getting records from {server=}: {err=}'\n",
+ " raise Exception(msg)\n",
+ "\n",
+ "print(f'Retrieved {len(narrative_recs)} records.')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "15",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ignore_fields = ['id']\n",
+ "new_column_names = dict(message_text='message',\n",
+ " date_added='date'\n",
+ " )\n",
+ "if narrative_recs:\n",
+ " df = pd.DataFrame(narrative_recs).rename(columns=new_column_names)\n",
+ " user_df = df[['date','message']]\n",
+ "\n",
+ " display(Markdown(f'## Narrative log (Style A) for {number_of_days} days {min_day_obs} to {max_day_obs}'))\n",
+ " display(Markdown(\"### Choose display Style (or offer other suggestion)\"))\n",
+ " with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n",
+ " display(user_df)\n",
+ " \n",
+ " display(Markdown(f'## Narrative log (Style B)'))\n",
+ " for index,row in user_df.iterrows():\n",
+ " print(f\"{datetime.fromisoformat(user_df.iloc[0]['date']).date()}: \"\n",
+ " f\"{row['message']}\"\n",
+ " )"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "16",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.12"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks_tsqr/NightLog.yaml b/notebooks_tsqr/NightLog.yaml
new file mode 100644
index 0000000..79cecb0
--- /dev/null
+++ b/notebooks_tsqr/NightLog.yaml
@@ -0,0 +1,29 @@
+# For use with a Times Square notebook
+title: Night Log
+description: Combined report from Summit logs
+authors:
+ - name: Steve Pothier
+ slack: Steve Pothier
+ - name: Valerie Becker
+ slack: valerie becker
+tags:
+ - reporting
+ - prototype
+ - exposure
+parameters:
+ record_limit:
+ type: integer
+ description: Max number of records to output
+ default: 99
+ minimum: 1
+ maximum: 9999
+ day_obs:
+ type: string
+ description: The night to report on. (YYYY-MM-DD, TODAY, YESTERDAY)
+ default: "TODAY"
+ number_of_days:
+ type: integer
+ description: Number of days to show (ending in day_obs)
+ default: 1
+ minimum: 1
+ maximum: 9
diff --git a/notebooks_tsqr/logrep_proto_1.yaml b/notebooks_tsqr/logrep_proto_1.yaml
deleted file mode 100644
index ada7411..0000000
--- a/notebooks_tsqr/logrep_proto_1.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-# For use with a Times Square notebook
-title: LR mix
-description: Prototype 1
-authors:
- - name: Steve Pothier
- slack: Steve Pothier
-tags:
- - reporting
- - prototype
-parameters:
- record_limit:
- type: integer
- description: Max number of records to output
- default: 99
- minimum: 1
- maximum: 9999
diff --git a/notebooks_tsqr/sources_dashboard.ipynb b/notebooks_tsqr/sources_dashboard.ipynb
index 4036bdf..eb05fca 100644
--- a/notebooks_tsqr/sources_dashboard.ipynb
+++ b/notebooks_tsqr/sources_dashboard.ipynb
@@ -1,303 +1,50 @@
{
"cells": [
- {
- "cell_type": "markdown",
- "id": "0",
- "metadata": {},
- "source": [
- "# Logging and Reporting"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "1",
- "metadata": {},
- "source": [
- "## Table of contents\n",
- "* [Parameters](#params)\n",
- "* [Imports and setup](#imports)\n",
- "* [Try every server](#every-server)\n",
- "* [Report](#report)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "2",
- "metadata": {},
- "source": [
- "\n",
- "## Parameters\n",
- "The first code cell must contain parameters with string values for compatibility with Times Square.\n",
- "\n",
- "See: https://rsp.lsst.io/v/usdfdev/guides/times-square/index.html"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3",
- "metadata": {},
- "outputs": [],
- "source": [
- "#Parameters\n",
- "env = 'tucson' # usdf-dev, tucson, slac, summit\n",
- "record_limit = '9999'\n",
- "response_timeout = '3.05' # seconds, how long to wait for connection\n",
- "read_timeout = '20' # seconds"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "4",
- "metadata": {},
- "source": [
- "\n",
- "## Imports and General Setup"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "5",
- "metadata": {},
- "outputs": [],
- "source": [
- "import requests\n",
- "from collections import defaultdict\n",
- "import pandas as pd\n",
- "from pprint import pp"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "6",
- "metadata": {},
- "outputs": [],
- "source": [
- "limit = int(record_limit)\n",
- "timeout = (float(response_timeout), float(read_timeout))\n",
- "\n",
- "# Env list comes from drop-down menu top of:\n",
- "# https://rsp.lsst.io/v/usdfdev/guides/times-square/\n",
- "envs = dict(\n",
- " #rubin_usdf_dev = '',\n",
- " #data_lsst_cloud = '',\n",
- " #usdf = '',\n",
- " #base_data_facility = '',\n",
- " summit = 'https://summit-lsp.lsst.codes',\n",
- " usdf_dev = 'https://usdf-rsp-dev.slac.stanford.edu',\n",
- " #rubin_idf_int = '',\n",
- " tucson = 'https://tucson-teststand.lsst.codes',\n",
- ")\n",
- "envs"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "7",
- "metadata": {},
- "source": [
- "\n",
- "## Try to access every Server, every Log in our list\n",
- "We call the combination of a specific Server and specific Log a \"service\".\n",
- "This is a First Look. As such, we don't try to get a useful list of records. \n",
- "Instead, we save a few pieces of data from each service. A more tailored web-service call should be done to get useful records. For each service, we save:\n",
- "1. The number of records retrieved\n",
- "1. The list of fields found in a record (we assume all records from a service have the same fields)\n",
- "1. An example of 1-2 records.\n",
- "1. The [Facets](https://en.wikipedia.org/wiki/Faceted_search) of the service for all service fields that are not explictly excluded."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "8",
- "metadata": {},
- "outputs": [],
- "source": [
- "verbose=False\n",
- "fields = defaultdict(set) # fields[(env,log)] = {field1, field2, ...}\n",
- "examples = defaultdict(list) # examples[(env,log)] = [rec1, rec2]\n",
- "results = defaultdict(dict) # results[(env,log)] = dict(server,url, ok, numfields, numrecs)\n",
- "facets = defaultdict(dict) # facets[(env,log)] = dict(field) = set(value-1, value-2, ...)\n",
- "\n",
- "# Dumb! Using same ignore set for all LOGS.\n",
- "ignore_fields = set(['tags', 'urls', 'message_text', 'id', 'date_added', \n",
- " 'obs_id', 'day_obs', 'seq_num', 'parent_id', 'user_id',\n",
- " 'date_invalidated', 'date_begin', 'date_end',\n",
- " 'time_lost', # float\n",
- " #'systems','subsystems','cscs', # values are lists, special handling\n",
- " ])\n",
- "for env,server in envs.items():\n",
- " ok = True\n",
- " try:\n",
- " recs = None\n",
- " log = 'exposurelog'\n",
- " #!url = f'{server}/{log}/messages?is_human=either&is_valid=either&offset=0&{limit=}'\n",
- " url = f'{server}/{log}/messages?is_human=either&is_valid=either&{limit=}'\n",
- " print(f'\\nAttempt to get logs from {url=}')\n",
- " response = requests.get(url, timeout=timeout)\n",
- " response.raise_for_status()\n",
- " recs = response.json()\n",
- " flds = set(recs[0].keys())\n",
- " if verbose:\n",
- " print(f'Number of {log} records: {len(recs):,}')\n",
- " print(f'Got {log} fields: {flds}')\n",
- " print(f'Example record: {recs[0]}') \n",
- " fields[(env,log)] = flds\n",
- " examples[(env,log)] = recs[:2] \n",
- "\n",
- " facflds = flds - ignore_fields\n",
- " # Fails when r[fld] is a LIST instead of singleton\n",
- " # I think when that happens occasionaly, its a BUG in the data! It happens.\n",
- " facets[(env,log)] = {fld: set([str(r[fld])\n",
- " for r in recs if not isinstance(r[fld], list)]) \n",
- " for fld in facflds}\n",
- " except Exception as err:\n",
- " ok = False\n",
- " print(f'ERROR getting {log} from {env=} using {url=}: {err=}')\n",
- " numf = len(flds) if ok else 0\n",
- " numr = len(recs) if ok else 0\n",
- " results[(env,log)] = dict(ok=ok, server=server, url=url,numfields=numf, numrecs=numr)\n",
- "\n",
- " print()\n",
- " try:\n",
- " recs = None\n",
- " log = 'narrativelog'\n",
- " #! url = f'{server}/{log}/messages?is_human=either&is_valid=true&offset=0&{limit=}'\n",
- " url = f'{server}/{log}/messages?is_human=either&is_valid=either&{limit=}'\n",
- " print(f'\\nAttempt to get logs from {url=}')\n",
- " response = requests.get(url, timeout=timeout)\n",
- " response.raise_for_status()\n",
- " recs = response.json()\n",
- " flds = set(recs[0].keys())\n",
- " if verbose:\n",
- " print(f'Number of {log} records: {len(recs):,}')\n",
- " print(f'Got {log} fields: {flds}')\n",
- " print(f'Example record: {recs[0]}')\n",
- " fields[(env,log)] = flds \n",
- " examples[(env,log)] = recs[:2] \n",
- "\n",
- " facflds = flds - ignore_fields\n",
- " # Fails when r[fld] is a LIST instead of singleton\n",
- " # I think when that happens occasionaly, its a BUG in the data! It happens.\n",
- " # Look for BAD facet values like: {'None', None}\n",
- " facets[(env,log)] = {fld: set([r[fld] \n",
- " for r in recs if not isinstance(r[fld], list)]) \n",
- " for fld in facflds}\n",
- " except Exception as err:\n",
- " ok = False\n",
- " print(f'ERROR getting {log} from {env=} using {url=}: {err=}')\n",
- " numf = len(flds) if ok else 0\n",
- " numr = len(recs) if ok else 0\n",
- " results[(env,log)] = dict(ok=ok, server=server, url=url,numfields=numf, numrecs=numr)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "9",
- "metadata": {},
- "source": [
- "\n",
- "## Report\n",
- "This is a silly report that may be useful for developers. Not so much for astronomers."
- ]
- },
- {
- "cell_type": "markdown",
- "id": "10",
- "metadata": {},
- "source": [
- "\n",
- "### Success/Failure table"
- ]
- },
{
"cell_type": "code",
"execution_count": null,
- "id": "11",
+ "id": "0",
"metadata": {},
"outputs": [],
"source": [
- "show_columns = ['ok', 'server', 'numfields', 'numrecs']\n",
- "df = pd.DataFrame(data=dict(results)).T.loc[:,show_columns]\n",
- "print(f'Got results from {df[\"ok\"].values.sum()} of {len(df)} env/logs')\n",
- "df"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "12",
- "metadata": {},
- "source": [
- "\n",
- "### Field Names"
+ "from lsst.ts.logging_and_reporting.source_adapters import Dashboard"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "13",
+ "id": "1",
"metadata": {},
"outputs": [],
"source": [
- "print('Field names for each Environment/Log source:')\n",
- "for (env,log),flds in fields.items():\n",
- " field_names = ', '.join(flds)\n",
- " print(f'\\n{env}/{log}: {field_names}')\n",
- "#!dict(fields)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "14",
- "metadata": {},
- "source": [
- "\n",
- "### Facets"
+ "status = Dashboard().report(timeout=0.5)"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "15",
+ "id": "2",
"metadata": {},
"outputs": [],
"source": [
- "dict(facets)\n",
- "for (env,log),flds in facets.items():\n",
- " print(f'{env}/{log}:')\n",
- " for fld,vals in flds.items():\n",
- " print(f' {fld}: \\t{vals}')"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "16",
- "metadata": {},
- "source": [
- "\n",
- "### Example Records"
+ "status"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "17",
+ "id": "3",
"metadata": {},
"outputs": [],
"source": [
- "for (env,log),recs in examples.items():\n",
- " print(f'\\n{env=}, {log=}: ')\n",
- " print(' Example records: ')\n",
- " pp(recs)"
+ "successes = [k for k,v in status.items() if v == 200]\n",
+ "successes"
]
},
{
"cell_type": "code",
"execution_count": null,
- "id": "18",
+ "id": "4",
"metadata": {},
"outputs": [],
"source": []
diff --git a/python/lsst/ts/logging_and_reporting/logrep_utils.py b/python/lsst/ts/logging_and_reporting/logrep_utils.py
deleted file mode 100644
index 48edc3c..0000000
--- a/python/lsst/ts/logging_and_reporting/logrep_utils.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# This file is part of ts_logging_and_reporting.
-#
-# Developed for Vera C. Rubin Observatory Telescope and Site Systems.
-# This product includes software developed by the LSST Project
-# (https://www.lsst.org).
-# See the COPYRIGHT file at the top-level directory of this distribution
-# for details of code ownership.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-
-from urllib.parse import urlencode
-import itertools
-from datetime import datetime
-import warnings
-from collections import defaultdict
diff --git a/python/lsst/ts/logging_and_reporting/source_adapters.py b/python/lsst/ts/logging_and_reporting/source_adapters.py
index 37c67fd..761aa62 100644
--- a/python/lsst/ts/logging_and_reporting/source_adapters.py
+++ b/python/lsst/ts/logging_and_reporting/source_adapters.py
@@ -25,21 +25,22 @@
from urllib.parse import urlencode
import itertools
from datetime import datetime
-import warnings
+from warnings import warn
from collections import defaultdict
+
############################################
# External Packages
import requests
-
MAX_CONNECT_TIMEOUT = 3.1 # seconds
MAX_READ_TIMEOUT = 90 * 60 # seconds
class ApiAdapter:
+ # TODO document class including all class variables.
def __init__(self, *,
server_url='https://tucson-teststand.lsst.codes',
- connect_timeout=3.05, # seconds
- read_timeout=10 * 60, # seconds
+ connect_timeout=1.05, # seconds
+ read_timeout=2, # seconds
):
self.server = server_url
self.c_timeout = min(MAX_CONNECT_TIMEOUT,
@@ -55,8 +56,32 @@ def __init__(self, *,
self.categoricals = list()
self.foreign_keys = list()
+ service = None
+ endpoints = None
+
+
+ def check_endpoints(self, timeout=None):
+ to = (timeout or self.timeout)
+ print(f'Try connect to each endpoint of {self.server}/{self.service} '
+ f'using timeout={to}.')
+ url_http_status_code = dict()
+ for ep in self.endpoints:
+ url = f'{self.server}/{self.service}/{ep}'
+ try:
+ r = requests.get(url, timeout=(timeout or self.timeout))
+ except:
+ url_http_status_code[url] = 'timeout'
+ else:
+ url_http_status_code[url] = r.status_code
+ return url_http_status_code
+
def analytics(self, recs, categorical_fields=None):
+ if len(recs) == 0:
+ return dict(fields=[],
+ facet_fields=set(),
+ facets=dict())
+
non_cats = set([
'tags', 'urls', 'message_text', 'id', 'date_added',
'obs_id', 'day_obs', 'seq_num', 'parent_id', 'user_id',
@@ -80,8 +105,19 @@ def analytics(self, recs, categorical_fields=None):
)
+# Not available on SLAC (usdf) as of 9/9/2024.
+class NightReportAdapter(ApiAdapter):
+ service = "nightreport"
+ endpoints = ['reports']
+ primary_endpoint = 'reports'
+
class NarrativelogAdapter(ApiAdapter):
+ """TODO full documentation
+ """
service = 'narrativelog'
+ endpoints = ['messages',]
+
+
primary_endpoint = 'messages'
fields = {'category',
'components',
@@ -162,7 +198,8 @@ def get_messages(self,
is_human='either',
is_valid='either',
offset=None,
- limit=None
+ limit=None,
+ outfields=None,
):
qparams = dict(is_human=is_human, is_valid=is_valid)
if site_ids:
@@ -181,13 +218,14 @@ def get_messages(self,
try:
recs = requests.get(url, timeout=self.timeout).json()
except Exception as err:
- warnings.warn(f'No {self.service} records retrieved: {err}')
+ warn(f'No {self.service} records retrieved: {err}')
recs = []
if len(recs) == 0:
raise Exception(f'No records retrieved from {url}')
+ if recs:
+ recs.sort(key=lambda r: r['date_begin'])
self.recs = recs
- self.recs.sort(key=lambda r: r['date_begin'])
return recs
def get_timelost(self, rollup='day'):
@@ -200,7 +238,19 @@ def get_timelost(self, rollup='day'):
return day_tl
class ExposurelogAdapter(ApiAdapter):
+ """TODO full documentation
+
+ EXAMPLES:
+ gaps,recs = logrep_utils.ExposurelogAdapter(server_url='https://usdf-rsp-dev.slac.stanford.edu').get_observation_gaps('LSSTComCam')
+ gaps,recs = logrep_utils.ExposurelogAdapter(server_url='[[https://tucson-teststand.lsst.codes').get_observation_gaps('LSSTComCam')
+ """
+ ignore_fields = ['id']
service = 'exposurelog'
+ endpoints = [
+ 'instruments',
+ 'exposures',
+ 'messages',
+ ]
primary_endpoint = 'messages'
fields = {'date_added',
'date_invalidated',
@@ -251,13 +301,30 @@ class ExposurelogAdapter(ApiAdapter):
}
+ def check_endpoints(self, timeout=None):
+ to = (timeout or self.timeout)
+ print(f'Try connect to each endpoint of {self.server}/{self.service} '
+ f'using timeout={to}.')
+ url_http_status_code = dict()
+
+ for ep in self.endpoints:
+ qstr = '?instrument=na' if ep == 'exposures' else ''
+ url = f'{self.server}/{self.service}/{ep}{qstr}'
+ try:
+ r = requests.get(url, timeout=to)
+ except:
+ url_http_status_code[url] = 'timeout'
+ else:
+ url_http_status_code[url] = r.status_code
+ return url_http_status_code
+
def get_instruments(self):
url = f'{self.server}/{self.service}/instruments'
try:
instruments = requests.get(url, timeout=self.timeout).json()
except Exception as err:
- warnings.warn(f'No instruments retrieved: {err}')
+ warn(f'No instruments retrieved: {err}')
instruments = dict(dummy=[])
# Flatten the lists
return list(itertools.chain.from_iterable(instruments.values()))
@@ -268,7 +335,7 @@ def get_exposures(self, instrument, registry=1):
try:
recs = requests.get(url, timeout=self.timeout).json()
except Exception as err:
- warnings.warn(f'No exposures retrieved: {err}')
+ warn(f'No exposures retrieved: {err}')
recs = []
return recs
@@ -283,7 +350,7 @@ def get_messages(self,
is_valid='either',
exposure_flags=None,
offset=None,
- limit=None
+ limit=None,
):
qparams = dict(is_human=is_human, is_valid=is_valid)
if site_ids:
@@ -295,7 +362,7 @@ def get_messages(self,
if min_day_obs:
qparams['min_day_obs'] = min_day_obs
if max_day_obs:
- qparams['max_day_obs'] = max_day_obs
+ qparams['max_day_obs'] = max_day_obs
if exposure_flags:
qparams['exposure_flags'] = exposure_flags
if offset:
@@ -305,16 +372,19 @@ def get_messages(self,
qstr = urlencode(qparams)
url = f'{self.server}/{self.service}/messages?{qstr}'
+ recs = []
try:
- recs = requests.get(url, timeout=self.timeout).json()
+ response = requests.get(url, timeout=self.timeout)
+ recs = response.json()
except Exception as err:
warnings.warn(f'No {self.service} records retrieved: {err}')
- recs = []
+
if len(recs) == 0:
- raise Exception(f'No records retrieved from {url}')
+ warn(f'No records retrieved from {url}')
+ if recs:
+ recs.sort(key=lambda r: r['day_obs'])
self.recs = recs
- self.recs.sort(key=lambda r: r['day_obs'])
return recs
def get_observation_gaps(self, instruments=None,
@@ -361,6 +431,44 @@ def get_observation_gaps(self, instruments=None,
-# gaps,recs = logrep_utils.ExposurelogAdapter(server_url='https://usdf-rsp-dev.slac.stanford.edu').get_observation_gaps('LSSTComCam')
-# gaps,recs = logrep_utils.ExposurelogAdapter(server_url='[[https://tucson-teststand.lsst.codes').get_observation_gaps('LSSTComCam')
+class Dashboard: # TODO Complete and move to its own file.
+ """Verify that we can get to all the API endpoints and databases we need for
+ any of our sources.
+ """
+
+ envs = dict(
+ summit = 'https://summit-lsp.lsst.codes',
+ usdf_dev = 'https://usdf-rsp-dev.slac.stanford.edu',
+ tucson = 'https://tucson-teststand.lsst.codes',
+ # Environments not currently used:
+ # rubin_usdf_dev = '',
+ # data_lsst_cloud = '',
+ # usdf = '',
+ # base_data_facility = '',
+ # rubin_idf_int = '',
+ )
+ adapters = [ExposurelogAdapter,
+ NarrativelogAdapter,
+ # NightReportAdapter, # TODO
+ ]
+
+ def report(self, timeout=None):
+ url_status = dict()
+ for env,server in self.envs.items():
+ for adapter in self.adapters:
+ service = adapter(server_url=server)
+ # url_status[endpoint_url] = http_status_code
+ url_status.update(service.check_endpoints(timeout=timeout))
+ total_cnt = 0
+ good_cnt = 0
+ good = list()
+ print('\nStatus for each endpoint URL:')
+ for url,stat in url_status.items():
+ print(f'{stat}\t{url}')
+ total_cnt += 1
+ if stat == 200:
+ good_cnt += 1
+ good.append(url)
+ print(f'\nConnected to {good_cnt} out of {total_cnt} endpoints.')
+ return good_cnt, good
diff --git a/python/lsst/ts/logging_and_reporting/utils.py b/python/lsst/ts/logging_and_reporting/utils.py
new file mode 100644
index 0000000..c6801c8
--- /dev/null
+++ b/python/lsst/ts/logging_and_reporting/utils.py
@@ -0,0 +1,68 @@
+# This file is part of ts_logging_and_reporting.
+#
+# Developed for Vera C. Rubin Observatory Telescope and Site Systems.
+# This product includes software developed by the LSST Project
+# (https://www.lsst.org).
+# See the COPYRIGHT file at the top-level directory of this distribution
+# for details of code ownership.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+def tic():
+ """Start timer.
+ """
+ tic.start = time.perf_counter()
+
+def toc():
+ """Stop timer.
+
+ Returns
+ -------
+ elapsed_seconds : float
+ Elapsed time in fractional seconds since the previous `tic()`.
+ """
+
+ elapsed_seconds = time.perf_counter() - tic.start
+ return elapsed_seconds # fractional
+
+class Timer():
+ """Elapsed seconds timer.
+
+ Multiple instances can be used simultaneously and can overlap.
+ Repeated use of `toc` without an intervening `tic` will yield increasing
+ large elapsed times starting from the same point in time.
+
+ Example:
+ timer0 = Timer()
+ ...do stuff...
+ timer1 = Timer()
+ ...do stuff...
+ elapsed1 = timer1.toc # 10.1
+ ...do stuff...
+ elapsed1bigger = timer1.toc # 22.1
+ elapsed0 = timer0.toc # 50.0
+ """
+
+ def __init__(self):
+ self.tic
+
+ @property
+ def tic(self):
+ self.start = time.perf_counter()
+ return self.start
+
+ @property
+ def toc(self):
+ elapsed_seconds = time.perf_counter() - self.start
+ return elapsed_seconds # fractional