diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 00000000..826b56de
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,2 @@
+[run]
+omit=venv/*
diff --git a/.github/actions/setup-nox/action.yml b/.github/actions/setup-nox/action.yml
deleted file mode 100644
index 69359dee..00000000
--- a/.github/actions/setup-nox/action.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-name: Setup Nox
-description: 'Prepares all python versions for nox'
-
-runs:
- using: composite
- steps:
- - uses: actions/setup-python@v3
- with:
- python-version: "pypy-3.7"
- - uses: actions/setup-python@v3
- with:
- python-version: "pypy-3.8"
- - uses: actions/setup-python@v3
- with:
- python-version: "pypy-3.9"
- - uses: actions/setup-python@v3
- with:
- python-version: "2.7"
- - uses: actions/setup-python@v3
- with:
- python-version: "3.5"
- - uses: actions/setup-python@v3
- with:
- python-version: "3.6"
- - uses: actions/setup-python@v3
- with:
- python-version: "3.7"
- - uses: actions/setup-python@v3
- with:
- python-version: "3.8"
- - uses: actions/setup-python@v3
- with:
- python-version: "3.9"
- - uses: actions/setup-python@v3
- with:
- python-version: "3.10"
- - name: "Install nox"
- run: pipx install nox
- shell: bash
diff --git a/.github/deadpendency.yaml b/.github/deadpendency.yaml
index 72aebf4a..03111d30 100644
--- a/.github/deadpendency.yaml
+++ b/.github/deadpendency.yaml
@@ -1,37 +1,10 @@
ignore-failures:
- python:
- - aioserial
- - getmac
- - backcall
- - commonmark
- - entrypoints
- - future
- - ipython-genutils
- - pickleshare
- - pyasn1
- - pyasn1-modules
- - webencodings
- - wcwidth
+ python:
+ - pyserial
additional-deps:
python:
# name can be included so Deadpendency can load the package details in the registry
- - name: aiofiles
- repo: Tinche/aiofiles
-
- name: asyncua
repo: FreeOpcUa/opcua-asyncio
-
- - name: ipywidgets
- repo: jupyter-widgets/ipywidgets
-
- - name: lmfit
- repo: lmfit/lmfit-py
-
- name: lxml
repo: lxml/lxml
-
- - name: nmrglue
- repo: jjhelmus/nmrglue
-
- - name: opcua
- repo: FreeOpcUa/python-opcua
diff --git a/.github/workflows/publish_pypi.yml b/.github/workflows/publish_pypi.yml
index 39630b28..8a2a0497 100644
--- a/.github/workflows/publish_pypi.yml
+++ b/.github/workflows/publish_pypi.yml
@@ -11,26 +11,29 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- - name: Set up Python 3.9
- uses: actions/setup-python@v1
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v4
with:
- python-version: 3.9
+ python-version: '3.10'
- name: Install pypa/build
run: |
python -m pip install build --user
+
- name: Build a binary wheel and a source tarball
run: |
python -m build --sdist --wheel --outdir dist/ .
+
- name: Publish distribution 📦 to Test PyPI
- uses: pypa/gh-action-pypi-publish@master
+ uses: pypa/gh-action-pypi-publish@release/v1
with:
user: dcambie
password: ${{ secrets.TEST_PYPI_API_TOKEN }}
repository_url: https://test.pypi.org/legacy/
skip_existing: true
+
- name: Publish distribution 📦 to PyPI
- if: startsWith(github.ref, 'refs/tags/v')
- uses: pypa/gh-action-pypi-publish@master
+ if: startsWith(github.ref_name, 'v')
+ uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_API_TOKEN }}
@@ -40,9 +43,9 @@ jobs:
steps:
- uses: actions/checkout@master
- name: Publish release 📦 to GitHub
- if: startsWith(github.ref, 'refs/tags/v')
+ if: startsWith(github.ref_name, 'v')
uses: elgohr/Github-Release-Action@master
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }}
with:
- args: AutomatedRelease
+ title: flowchem ${{ github.ref_name }}
diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml
index 21f27e7a..df147ce4 100644
--- a/.github/workflows/python-app.yml
+++ b/.github/workflows/python-app.yml
@@ -12,19 +12,25 @@ on:
jobs:
build:
runs-on: ${{ matrix.operating-system }}
+ timeout-minutes: 30
strategy:
matrix:
operating-system:
- ubuntu-latest
- - windows-latest
- - macOS-latest
+# - windows-latest
+# - macOS-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- - name: Setup nox
- uses: ./.github/actions/setup-nox
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.11'
- - name: Run nox
- run: nox
+ - name: Install flowchem
+ run: python -m pip install .[dev]
+
+ - name: Run tox
+ run: tox
diff --git a/.gitignore b/.gitignore
index e8a71ad5..d64973dc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -127,4 +127,5 @@ dmypy.json
# Pyre type checker
.pyre/
-/flowchem/components/devices/Vapourtec/commands.py
+/flowchem/devices/Vapourtec/commands.py
+/src/flowchem/devices/knauer_hplc_nda.py
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 453afcb4..92628b1d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,17 +1,44 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
-fail_fast: true
+fail_fast: false
repos:
- repo: https://github.com/psf/black
- rev: 22.1.0
+ rev: 22.8.0
hooks:
- id: black
language_version: python3
+
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v3.2.0
+ rev: v4.3.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
+
+- repo: https://github.com/asottile/reorder_python_imports
+ rev: v3.8.2
+ hooks:
+ - id: reorder-python-imports
+ args: ["--py39-plus", "--application-directories=.:src"]
+
+- repo: https://github.com/asottile/pyupgrade
+ rev: v2.38.0
+ hooks:
+ - id: pyupgrade
+ args: [--py310-plus]
+
+#- repo: https://gitlab.com/pycqa/flake8
+# rev: 5.0.4
+# hooks:
+# - id: flake8
+# additional_dependencies: [
+# 'flake8-bugbear',
+## 'flake8-comprehensions',
+# 'flake8-deprecated',
+## 'flake8-docstrings',
+# 'flake8-pep3101',
+# 'flake8-string-format',
+# ]
+# args: ['--count', '--extend-ignore=E501,E203', '--show-source', '--statistics']
diff --git a/.readthedocs.yml b/.readthedocs.yml
new file mode 100644
index 00000000..1f3750a0
--- /dev/null
+++ b/.readthedocs.yml
@@ -0,0 +1,37 @@
+# .readthedocs.yaml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+# Set the version of Python and other tools you might need
+build:
+ os: ubuntu-20.04
+ tools:
+ python: "3.10"
+ # You can also specify other tool versions:
+ # nodejs: "16"
+ # rust: "1.55"
+ # golang: "1.17"
+
+# Build documentation in the docs/ directory with Sphinx
+sphinx:
+ configuration: docs/conf.py
+
+# If using Sphinx, optionally build your docs in additional formats such as PDF
+# formats:
+# - pdf
+
+# Optionally declare the Python requirements required to build your documentation
+#python:
+# install:
+# - requirements: docs/requirements.txt
+
+
+python:
+ install:
+ - method: pip
+ path: .
+ extra_requirements:
+ - docs
diff --git a/CODE_of_CONDUCT.md b/CODE_of_CONDUCT.md
new file mode 100644
index 00000000..35b51670
--- /dev/null
+++ b/CODE_of_CONDUCT.md
@@ -0,0 +1,132 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual
+identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the overall
+ community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or advances of
+ any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email address,
+ without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+[INSERT CONTACT METHOD].
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series of
+actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or permanent
+ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within the
+community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.1, available at
+[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
+
+Community Impact Guidelines were inspired by
+[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
+
+For answers to common questions about this code of conduct, see the FAQ at
+[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
+[https://www.contributor-covenant.org/translations][translations].
+
+[homepage]: https://www.contributor-covenant.org
+[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
+[Mozilla CoC]: https://github.com/mozilla/diversity
+[FAQ]: https://www.contributor-covenant.org/faq
+[translations]: https://www.contributor-covenant.org/translations
diff --git a/LICENSE b/LICENSE
index f0f732b1..449cea48 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright 2021
+Copyright 2022
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index 6d279030..00000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,3 +0,0 @@
-graft flowchem
-global-exclude __pycache__
-global-exclude *.py[cod]
diff --git a/README.md b/README.md
index 01a62629..62da763c 100644
--- a/README.md
+++ b/README.md
@@ -4,37 +4,46 @@ Welcome to flowchem
![github-actions](https://github.com/cambiegroup/flowchem/actions/workflows/python-app.yml/badge.svg)
[![PyPI version fury.io](https://badge.fury.io/py/flowchem.svg)](https://pypi.org/project/flowchem/)
+[![Documentation Status](https://readthedocs.org/projects/flowchem/badge/?version=latest)](https://flowchem.readthedocs.io/en/latest/?badge=latest)
[![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com)
[![MIT license](https://img.shields.io/badge/License-MIT-blue.svg)](https://lbesson.mit-license.org/)
[![DOI](https://zenodo.org/badge/300656785.svg)](https://zenodo.org/badge/latestdoi/300656785)
+[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_of_CONDUCT.md)
-Flowchem is a python library to automated flow chemistry experiments.
+Flowchem is a python library to control a variety of instruments commonly found in chemistry labs.
-Currently, the following instruments can be controlled via flowchem:
+### Overview
+Using flowchem is simple. You only need to
+1. **Create a configuration file** with the connection parameters for the devices you want to control (see the
+[User Guide](https://flowchem.readthedocs.io/en/latest/user_guide.html) for details).
+2. **Run `flowchem my_device_config_file.toml`** with the name of your configuration file
+3. **Done**!
+A web server will be created serving a RESTful API endpoint for each device, directly
+usable in browser or programmatically.
+
+### Supported devices
+Currently, the following instruments are supported, but we are open to contributions and the list keeps expanding!
- Pumps (Knauer P2.1, Harvard Apparatus Elite 11, Hamilton ML600)
- Valves (ViciValco and Knauer)
- Thermostat (Huber)
- Analytical instruments (Magritek Spinsolve benchtop NMR and Mattler Toledo FlowIR)
- General purpose sensors-actuators from Phidgets (e.g. 4...20 mA sensor to interface with Swagelok pressure sensors)
-
-## Table of Contents
-* [Installation](#installation)
-* [Usage](#usage)
-* [License](#license)
-* [Questions](#questions)
-
+ - ... [add support for a new device](https://flowchem.readthedocs.io/en/latest/add_new_device_type.html)!
## Installation
-```bash
-pip install flowchem
+Python 3.10 is needed, and it is suggested to install flowchem with pipx.
+You can install pipx and flowchem as follows:
+```shell
+pip install pipx
+pipx ensurepath
+pipx install flowchem
```
-
-## Usage
-{WRITE ME}
+## Documentation
+You can find the documentation online on [flowchem.readthedocs.io](https://flowchem.readthedocs.io/en/latest/).
## License
This project is released under the terms of the MIT License.
## Questions
-For questions about this project, fell free to open a GitHub issue, or reach out by email at dario.cambie@mpikg.mpg.de.
+For questions about this project, fell free to open a GitHub issue, or reach out [by email](mailto:2422614+dcambie@users.noreply.github.com).
diff --git a/docs/Elite11.md b/docs/Elite11.md
deleted file mode 100644
index d0d26e87..00000000
--- a/docs/Elite11.md
+++ /dev/null
@@ -1,89 +0,0 @@
-Harvard Apparatus Elite 11
-==========================
-
-Flowchem implements the Protocol11 syntax to communicate with Elite11 pumps.
-
-## Connection
-The USB type-B port on the back of the pump, once connected to a PC, creates a virtual serial port (drivers are
-auto-installed on Windows and not needed on Linux).
-
-To identify the serial port to which the pump is connected to you can use the utility function `elite11_finder()` as
-follows:
-
-```pycon
->>> from flowchem.devices.Harvard_Apparatus.Elite11_finder import elite11_finder
->>> elite11_finder()
-Looking for pump on COM3...
-Looking for pump on COM4...
-Found a pump with address 06 on COM4!
-Out[5]: {'COM4'}
-
-```
-
- .. note::
-Multiple pumps can be daisy chained on the same serial port provided they all have different address and that the pump
-connected to the PC has address 0. See manufacturer documentation for more info.
-
-### Model type
-Note that there are two models of Elite11, an "infuse only" and an "infuse and withdraw" pump.
-If you only need infuse capabilities just use:
-```python
-from flowchem import Elite11InfuseOnly
-```
-this will work with both pump models.
-On the other hand, if you need withdraw commands you need:
-```python
-from flowchem import Elite11InfuseWithdraw
-```
-whose `initialize()` method will take care of ensuring that the pump supports withdrawing.
-
-The constructor and all the methods are the same for both `Elite11` pumps, with the exception of the withdrawing commands being
-only available in `Elite11InfuseWithdraw`.
-
-
-## Test Connection
-Now that you know the serial port your pump is connected to, and the model of your pump, you can instantiate it and test the connection.
-```python
-from flowchem import HarvardApparatusPumpIO, Elite11InfuseWithdraw
-pumpio = HarvardApparatusPumpIO(port='COM4')
-pump1 = Elite11InfuseWithdraw(pump_io=pumpio, diameter=10.2, syringe_volume=10, address=0)
-pump2 = Elite11InfuseWithdraw(pump_io=pumpio, diameter=10.2, syringe_volume=10, address=1)
-
-```
-Alternatively, the `from_config()` classmethod can be used to instantiate the pump without the need of creating an
-HarvardApparatusPumpIO object (will be done automatically and shared across pumps on the same serial port).
-```python
-from flowchem import Elite11InfuseWithdraw
-pump = Elite11InfuseWithdraw.from_config(port="COM4", address=0, diameter="14.5 mm", syringe_volume="10 ml", name="acetone")
-# Note that the constructor above is equivalent to the following
-pump_config = {
- 'port': 'COM4',
- 'address': 0,
- 'name': "acetone",
- 'diameter': "14.6 mm",
- 'syringe_volume': "10 ml"
-}
-pump = Elite11InfuseWithdraw.from_config(**pump_config)
-# ... which is what is actually used when a device configuration is provided in yaml format e.g. via graph file.
-```
-
-## Initialization
-The first step after the creation of the pump object is the initialization, via the `initialize()` method, e.g.:
-```python
-await pump.initialize()
-```
-Note that the `initialize()` method returns a coroutine, so it must be called with `await` in order to wait for the pump to be ready.
-If you are not familiar with asynchronous syntax in python you can just call it with `asyncio.run()`.
-```python
-import asyncio
-asyncio.run(pump.initialize())
-```
-The initialization is needed to find the pump address if non was provided (the autodetection only works if a single pump is
-present on the serial port provided), to set the syringe volume and diameter and to ensure that the pump supports
-withdrawing moves if it has been initialized as `Elite11InfuseWithdraw`.
-
-## Usage
-Once you've initialized the pump, you can use all the methods it exposes. See FIXME:add sphinx autodoc link for the API reference.
-
-## API docs
-Autogenerate this
diff --git a/docs/IDEAS.md b/docs/IDEAS.md
deleted file mode 100644
index 119e3c09..00000000
--- a/docs/IDEAS.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# Ideas and notes
-
-### Injection loops
-Treat loop as a a kind of sequential syringe pump. Can look up attached devices. need loading and dispensing capability. Both entails valve switching and activating one of 2 pumps.
diff --git a/docs/ML600.md b/docs/ML600.md
deleted file mode 100644
index b8787203..00000000
--- a/docs/ML600.md
+++ /dev/null
@@ -1,25 +0,0 @@
-Hamilton ML600
-==============
-
-Flowchem implements the Protocol1/RNO+ syntax to communicate with ML-600 pumps.
-
-## Table of Contents
-* [Connection](#connection)
-* [Example](#example)
-* [API docs](#API docs)
-
-## Connection
-Serial communication (RS-232) with the pump takes place over a standard serial cable (DB-9 male/female connector).
-To identify the serial port to which the pump is connected to you can use the utility script `ML600_finder.py` as follows:
-
-
-Follow the manufacturer instruction
-
-## Example
-lalla
-
-## MultiPump
-
-
-## API docs
-Autogenerate this
diff --git a/docs/Principles.md b/docs/Principles.md
deleted file mode 100644
index 11f20950..00000000
--- a/docs/Principles.md
+++ /dev/null
@@ -1,22 +0,0 @@
-Principles:
-- No-code platform: device settings via YAML file and method calls via OpenAPI (directly usable in the browser).
-- only specify settings in a config file, `pipex` the module and use the OpenAPI (e.g. testing via web-broswer).
-- cross-platform HTTP-based API interface for interoperability. (This circumvents the issues with python dependencies and versioning conflicts and allows us to use modern python.)
-- it should still be possible to interact with the device object directly, i.e. without the HTTP interface, for power-users.
-
-Implementation design:
-- The end user should not need any knowledge of any implementation detail. Underlying complexity has to be handled internally and hidden to the user.
-- Device objects should only raise Exceptions upon instantiation.
- - the connection to the device is implicit in the object instantiation
- - raising warning is the preferred way to signal errors during execution as it allows the control code to continue w.g. with cleanup
- - communication streams are passed to the device constructors (i.e. dependency injection). This simplifies testing.
-- Each device module should be independent. Code sharing is possible via `flowchem.analysis` (or `flowchem.utils` et simil.)
-- Each device module should be accompanied by tests and documentation (at least in form of examples).
-- Device objects should use generic `flowchem.exceptions` or sublcasses thereof.
-
-
-Inspired by many packages with similar aims, including (in alphabetical order):
-- [Chemios](https://github.com/Chemios/chemios)
-- [ChemOS](https://github.com/aspuru-guzik-group/ChemOS)
-- [MechWolf](https://github.com/MechWolf/MechWolf)
-- [Octopus](https://github.com/richardingham/octopus)
diff --git a/docs/conf.py b/docs/conf.py
index 1e33c3a0..9d35382a 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -3,26 +3,30 @@
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
-
# -- Path setup --------------------------------------------------------------
-
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
+import datetime
+import os
+import sys
+from importlib import metadata
+sys.path.insert(0, os.path.abspath("../src"))
+print(sys.path)
-# -- Project information -----------------------------------------------------
+CONF_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
+ROOT_DIR = os.path.abspath(os.path.join(CONF_DIR, os.pardir))
-project = "Flowchem"
-copyright = "2021, Dario Cambie, Jakob Wolf"
-author = "Dario Cambie, Jakob Wolf"
-# The full version, including alpha/beta/rc tags
-release = "0.0.1"
+# -- Project information -----------------------------------------------------
+
+# Extract from
+project = "flowchem"
+YEAR = datetime.date.today().strftime("%Y")
+author = "Dario Cambié"
+copyright = f"{YEAR}, {author}"
+release = metadata.version("flowchem")
# -- General configuration ---------------------------------------------------
@@ -30,7 +34,31 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = []
+extensions = [
+ "myst_parser",
+ "sphinx.ext.autodoc",
+ "sphinx.ext.napoleon",
+ "sphinxcontrib.openapi",
+ "sphinxcontrib.httpdomain",
+]
+
+source_suffix = [".rst", ".md"]
+autodoc_member_order = "bysource"
+
+myst_enable_extensions = [
+ "amsmath",
+ "colon_fence",
+ "deflist",
+ "dollarmath",
+ "fieldlist",
+ "html_admonition",
+ "html_image",
+ "replacements",
+ "smartquotes",
+ "strikethrough",
+ "substitution",
+ "tasklist",
+]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
@@ -46,9 +74,10 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = "alabaster"
+html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ["_static"]
+html_static_path = []
+# html_static_path = ["_static"]
diff --git a/docs/contributing/add_device/add_as_plugin.md b/docs/contributing/add_device/add_as_plugin.md
new file mode 100644
index 00000000..c4ee3440
--- /dev/null
+++ b/docs/contributing/add_device/add_as_plugin.md
@@ -0,0 +1 @@
+# Add new device as external plugin
diff --git a/docs/contributing/add_device/add_to_flowchem.md b/docs/contributing/add_device/add_to_flowchem.md
new file mode 100644
index 00000000..25bd5237
--- /dev/null
+++ b/docs/contributing/add_device/add_to_flowchem.md
@@ -0,0 +1,67 @@
+# Add new device to flowchem
+
+## Code
+In the `flowchem.device` subpackage, the device modules are organized in folders by manufacturer.
+Since this is the first device from _Weasley & Weasley_ in flowchem, we need to create a new folder under
+`/flowchem/devices`. Let's call it `/flowchem/devices/Weasley` to avoid the use of special characters ;)
+
+In this folder we will write a _module_ (i.e. a python file 🐍) called `ExtendableEar.py` to control our magic device.
+We create it piece by piece, but the content of the module will look like this:
+
+```python
+from flowchem.devices.flowchem_device import FlowchemDevice
+
+
+class ExtendeableEar(FlowchemDevice):
+ """Our virtual Extendable Ear!"""
+
+ def __init__(self):
+ ...
+
+```
+
+
+
+```python
+from flowchem.devices.flowchem_device import FlowchemDevice
+
+
+class ExtendeableEar(FlowchemDevice):
+ """Our virtual Extendable Ear!"""
+
+ def __init__(self):
+ ...
+
+ async def initialize(self):
+ ...
+
+ async def deploy(self):
+ ...
+
+ async def listen_for(self, seconds: str):
+ ...
+
+ async def retract(self):
+ ...
+
+
+
+```
+- instantiable from dict or if not possible with from_config @classmethod
+
+Finally, to register your new device type to the flowchem
+add to `__init__.py` to ensure it is available for import with a statement like
+```python
+from flowchem.devices import ExtendableEar
+```
+
+
+## Documentation
+Write a brief description of the class you created in the /docs/devices/ folder, following the same manufacturer-base hierarchy.
+Ideally manufacturer communication manual is added to the docs
+
+:::{note}
+This is an implementation detail that user do not have to care about since the `flowchem.device` submodules will hide
+this nested layer via their `__init__.py`. If you do not understand what this means it is not important, and you can
+safely skip this note if you follow this guide.
+:::
diff --git a/docs/contributing/add_device/index.md b/docs/contributing/add_device/index.md
new file mode 100644
index 00000000..1f884ae4
--- /dev/null
+++ b/docs/contributing/add_device/index.md
@@ -0,0 +1,26 @@
+# Add support for new devices
+
+If you want to add support for a new device-type this is tha page for you!
+Let's assume you got a new lab device, an _✨Extendable Ear✨_ manufactured by _Weasley & Weasley_.
+And of course you want to control it via flowchem. Solid idea!👏
+
+You have two possibilities:
+* add support directly into flowchem (fork the repo, add device-specific code and
+create a pull request)
+* add support via a plugin (e.g. a `flowchem-extendable-ear` package)
+
+In general, devices whose support needs the addition of new dependencies to flowchem are better packaged as plugins,
+while generally useful modules are ideally embedded with flowchem.
+This is to limit the amount dependencies in `flowchem` while enabling support to devices with more complex needs.
+
+For example, a device only needing serial communication such as a syringe pump is ideal for native support, while the
+interface to Spinsolve NMR, that needs an external library form XML parsing, is provided as a plugin.
+
+```{toctree}
+:maxdepth: 2
+:caption: Add device to flowchem
+
+add_to_flowchem
+add_as_plugin
+
+```
diff --git a/docs/contributing/community.md b/docs/contributing/community.md
new file mode 100644
index 00000000..f0e6b087
--- /dev/null
+++ b/docs/contributing/community.md
@@ -0,0 +1,6 @@
+# Community
+
+We aim at creating a community around flowchem, by incentive the participation from a diverse group of
+[contributors](./index.md).
+Please read the Contributor Covenant we adopted as Code of Conduct for guidance on how to interact with others in a way
+that makes the community thrive
diff --git a/docs/contributing/design_principles.md b/docs/contributing/design_principles.md
new file mode 100644
index 00000000..29aa7bef
--- /dev/null
+++ b/docs/contributing/design_principles.md
@@ -0,0 +1,74 @@
+# Software architecture
+
+## General
+### In which order are device initialized
+When `flochem` is called with a configuration file via CLI, the following happens:
+1. The configuration is parsed, and all the hardware device object are created in the order they appear in the file.
+2. Communication is established for all the hardware device via the `async` method `initialize()`.
+3. The components of each hardware object are collected, their routes added to the API server and advertised via mDNS.
+4. Flowchem is ready to be used.
+
+It follows that:
+* All the code in components can assume that a valid connection to the hw device is already in place.
+* Components can use introspection e.g. to determine if a pump has withdrawing capabilities or not.
+
+### Why no device orchestration functionalities are included?
+Lab automation holds great potential, yet research lab rarely reuse existing code.
+One reason for this is the lack of modularity in the existing lab-automation solutions. While a monolithic approach is faster to implement, it lacks flexibility.
+
+We designed flowchem to be a solid foundation for other modules to be based on.
+
+We try to follow the unix philosophy: do one thing, and do it well. Flowchem provides uniform API endpoints for the heterogeneous environment of lab devices.
+
+### Why Python 3.10?
+The recommended use of flowchem is to run it as standalone app to provide homogeneous REST API access to the variegated landscape of lab devices. The direct import of device objects is highly discouraged.
+This allows us to use a recent version of Python and to exploit all the newly introduced features.
+For example, in the codebase are used the walrus operator (`:=`) and `importlib.metadata` introduced in 3.8, the dict merge with OR operator introduced in 3.9 and the type hints unions with the OR operator introduced in 3.10. We are looking forward to the inclusion of `tomllib` in the stdlib for 3.11 to drop the external dependency on `tomli`.
+
+### Why FastAPI?
+To create the API endpoints we use fastAPI mainly for its simplicity and for the ability to automatically generate openAPI specs from the type hints.
+The async aspects were particularly appealing since the communication with lab devices can take relatively long time (especially on slow protocols such as serial communication at 9600 baud) thus impacting the responsiveness of the API even at low requests/second.
+
+### Why Pint?
+Different devices use different units for the same quantities. For example, among pumps, Knauer HPLC pumps use ul/min as base unit, Harvard Apparatus syringe pumps can be set with different units from the nl/h to the ml/s while the Hamilton ML600 pumps have a custom steps-per-stroke parameter that controls the flow rate.
+Moreover, to offer a uniform experience and prevent errors, the same units should be used by the public API across different devices, yet the order of magnitudes involved are often experiment-specific.
+
+To solve all of these problems we decided to widely adopt [pint](https://pint.readthedocs.io/en/stable/) to represent any physical quantity. Particularly attractive was the possibility of serialize and de-serialize the quantities to strings with minor losses in precision. This matched our aim of enabling full configurability of device settings via a simple, text-based configuration file. For example, a syringe diameter can be intuitively specified as either "18.2 mm" or "1.82 cm".
+
+### Repository structure
+We follow the so-called "src-layout" i.e. with the source code in a `src` sub-folder. This increasingly popular trend among the Python ecosystem is to ensure that tox (among others) is using the built version of flowchem and not the local folder shadowing the same namespace. Read [this article](https://setuptools.pypa.io/en/latest/userguide/package_discovery.html#src-layout) for more details.
+
+### CLI application
+At its core, flowchem is a command line application that:
+1. parse a configuration file
+2. connects to the lab devices described and
+3. offer access to them via a RESTful API.
+
+All of this could in theory be achieved without installing anything via pipx run, e.g.
+```shell
+pipx run flowchem my_device_config.toml
+```
+
+Principles:
+- No-code shim: device settings via YAML file creates OpenAPI endpoints with predictable names based on serial numbers.
+- Implements existing interoperable standard for lab IoT to avoid standard proliferation.
+
+Implementation design:
+- intended use via CLI endpoint, installed via `pipex`.
+- Ideally, failure in one device should not affect the others. (Catch-all error via starlette middleware SO 61596911)
+- to ease debug, add support for auto-reload if settings file is changed. (easy, need to trigger reload on changes, via watchfiles)
+- only connection specific settings are needed. Device-specific are optional on instantiation even if needed for use.
+ - This e.g. a syringe pump might need syringe diameter and volume before use, but those are device specific parameters and not connection specific, so they are not required in flowchem config.
+ - ideally all permanent device specific parameters (not changing during normal use) are received/set in a uniform way and advertised as such (to enabling dynamic graphs config via web interface, somehow similar to Magritek protocol options).
+- Don't force code-reuse, but allow for easy extension and leave device modules as independent as possible.
+- Each device module should be accompanied by tests and documentation/examples.
+- Following abstract device ontologies ease abstraction in higher level code.
+- Each device MUST have a name, unique per server, tha will be the endpoint path. If Non will be generated.
+ - If a unique name can be programmatically used after init (e.g. based on SERIAL_NUMBER), than that will be also advertised in autodiscover name.
+ - This allows dependent libraries to use static names even though they are not yet known at flowchem init.
+
+Inspired by many packages with similar aims, including (in alphabetical order):
+- [Chemios](https://github.com/Chemios/chemios)
+- [ChemOS](https://github.com/aspuru-guzik-group/ChemOS)
+- [MechWolf](https://github.com/MechWolf/MechWolf)
+- [Octopus](https://github.com/richardingham/octopus)
diff --git a/docs/contributing/index.md b/docs/contributing/index.md
new file mode 100644
index 00000000..e0af5729
--- /dev/null
+++ b/docs/contributing/index.md
@@ -0,0 +1,112 @@
+# Contribute to flowchem
+% part of this page is based on the numpy project one
+% See also https://rdflib.readthedocs.io/en/stable/developers.html
+% And https://diataxis.fr/how-to-guides/
+
+Not a coder? Not a problem! Flowchem is multi-faceted, and we can use a lot of help.
+These are all activities we’d like to get help with :
+
+ Code maintenance and development
+
+ Developing educational content & narrative documentation
+
+ Writing technical documentation
+
+The rest of this document discusses working on the flowchem code base and documentation.
+
+## Development process
+1. If you are a first-time contributor:
+
+ * Go to [flowchem gitHub repository](https://github.com/cambiegroup/flowchem) and click the “fork” button to create your own copy of the project.
+
+ * Clone the project to your local computer:
+
+ * `git clone https://github.com/your-username/flowchem.git`
+
+ * Change the directory:
+
+ * cd flowchem
+
+ * Add the upstream repository:
+
+ * git remote add upstream https://github.com/cambiegroup/flowchem.git
+
+ * Now, git `remote -v` will show two remote repositories named:
+ * `upstream`, which refers to the `flowchem` repository
+ * `origin`, which refers to your personal fork
+
+2. Develop your contribution:
+
+ * Pull the latest changes from upstream:
+
+ * `git checkout main`
+ * `git pull upstream main`
+
+ * Create a branch for the feature you want to work on. Since the branch name will appear in the merge message, use a sensible name. For example, if you intend to add support for a new device type, called ExtendibleEar a good candidate could be ‘add-extendible-ear-support’:
+
+ * `git checkout -b add-extendible-ear-support`
+
+ * Commit locally as you progress (`git add` and `git commit`) Use a properly formatted commit message, write tests that fail before your change and pass afterward, run all the tests locally. Be sure to document any changed behavior in docstrings, keeping to the [Google docstring standard](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html).
+
+3. To submit your contribution:
+
+ * Push your changes back to your fork on GitHub:
+
+ * `git push origin add-extendible-ear-support`
+
+ * Enter your GitHub username and password (repeat contributors or advanced users can remove this step by connecting to GitHub with SSH).
+
+ * Go to GitHub. The new branch will show up with a green Pull Request button. Make sure the title and message are clear, concise, and self-explanatory. Then click the button to submit it.
+
+ * If your commit introduces a new feature or changes functionality, creat an issue on the GitHub repo to explain your changes. For bug fixes, documentation updates, etc., this is generally not necessary, though if you do not get any reaction, do feel free to ask for review.
+
+4. Review process:
+
+ * Reviewers (the other developers and interested community members) will write inline and/or general comments on your Pull Request (PR) to help you improve its implementation, documentation and style. We aim at protecting the main branch from direct commits to ensure all changes are introduced via pull requests that can be reviewed. The review is meant as friendly conversation from which we all learn and the overall code quality benefits. Please do not let the review discourage you from contributing: its only aim is to improve the quality of project, not to criticize (we are, after all, very grateful for your contribution!).
+
+ * To update your PR, make your changes on your local repository, commit, run tests, and only if they succeed push to your fork. As soon as those changes are pushed up (to the same branch as before) the PR will update automatically. If you have no idea how to fix the test failures, you may push your changes anyway and ask for help in a PR comment.
+
+ * Various continuous integration (CI) services are triggered after each PR update to build the code, run unit tests, measure code coverage and check coding style of your branch. The CI tests must pass before your PR can be merged. If CI fails, you can find out why by clicking on the “failed” icon (red cross) and inspecting the build and test log. To speed up this cycle you can also test your work locally before committing.
+
+ * A PR which has been approved by at least one core team member will be merged in the main branch and will be part of the next release of flowchem.
+
+5. Document changes
+
+ * If your change introduces support for a new device make sure to add description for it in the docs and the README.
+
+6. Cross-referencing issues
+
+ * If the PR solves an issue, you can add the text closes xxxx, where xxxx is the number of the issue. Instead of closes you can use any of the other flavors [gitHub accepts](https://help.github.com/en/articles/closing-issues-using-keywords) such as fix and resolve.
+
+## Guidelines
+
+* All code should be documented with docstrings in Google format and comments where appropriate.
+* All code should have tests.
+* We use [black](https://github.com/psf/black) not to waste time discussing details code style.
+* You can install [pre-commit](https://pre-commit.com/) to run black and other linters as part of the pre-commit hooks. See our `.pre-commit-config.yml` for details. The use of linter and import re-ordering is aimed at reducing diff size and merge conflicts in pull request.
+
+## Test coverage
+To run the tests `pytest` and some pytest plugins are needed. To install the testing-related dependency for local testing run this command from the root folder:
+```shell
+pip install .[test]
+```
+
+## Building docs
+The docs are automatically build for each commit at [readthedocs.com](https://readthedocs.org/projects/flowchem/).
+To build it locally, sphynx, myst-parser and other packages are needed. To install the tools to build the docs run this command from the root folder:
+```shell
+pip install .[docs]
+```
+
+Then from the docs folder run `make html` to generate html docs in the build directory.
+
+
+```{toctree}
+:maxdepth: 2
+
+community
+design_principles
+add_device/index
+models/device_models
+
+```
diff --git a/docs/contributing/models/device_models.md b/docs/contributing/models/device_models.md
new file mode 100644
index 00000000..8443f909
--- /dev/null
+++ b/docs/contributing/models/device_models.md
@@ -0,0 +1,21 @@
+# Models
+
+Each device object must be subclass of `BaseDevice`, either directly or more likely via one of `BaseDevice` subclasses.
+This allows to add features to all device objects ensuring consistency and without repeating code
+(i.e. following the [DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle).
+For example, the attribute `owl_subclass_of`, defined in `BaseDevice`, allows to specify classes of the Web Ontology
+Language.
+
+The scheme below represents the current device object taxonomy.
+
+![Flowchem models taxonomy](../../images/flowchem_models.svg)
+
+
+```{toctree}
+:maxdepth: 2
+
+valves/base_valve
+valves/injection_valve
+valves/multiposition_valve
+
+```
diff --git a/docs/contributing/models/valves/base_valve.md b/docs/contributing/models/valves/base_valve.md
new file mode 100644
index 00000000..9cc61e3e
--- /dev/null
+++ b/docs/contributing/models/valves/base_valve.md
@@ -0,0 +1,9 @@
+# Base Valve
+
+```{eval-rst}
+.. autoclass:: flowchem.models.valves.base_valve.BaseValve
+ :show-inheritance:
+ :members:
+ :exclude-members: get_router, initialize
+ :special-members: __init__
+```
diff --git a/docs/contributing/models/valves/injection_valve.md b/docs/contributing/models/valves/injection_valve.md
new file mode 100644
index 00000000..a50b6b28
--- /dev/null
+++ b/docs/contributing/models/valves/injection_valve.md
@@ -0,0 +1,14 @@
+# Injection Valve
+
+The injection valve model represents any valve with two positions: `LOAD` and `INJECT`.
+
+The typical example is a 6-ports-2-positions valve commonly used for HPLC sample injection.
+Example devices are
+
+```{eval-rst}
+.. autoclass:: flowchem.models.valves.injection_valve.InjectionValve
+ :show-inheritance:
+ :members:
+ :exclude-members: get_router, initialize
+ :special-members: __init__
+```
diff --git a/docs/contributing/models/valves/multiposition_valve.md b/docs/contributing/models/valves/multiposition_valve.md
new file mode 100644
index 00000000..d99a0487
--- /dev/null
+++ b/docs/contributing/models/valves/multiposition_valve.md
@@ -0,0 +1,9 @@
+# Multi-position Valve
+
+```{eval-rst}
+.. autoclass:: flowchem.models.valves.multiposition_valve.MultiPositionValve
+ :show-inheritance:
+ :members:
+ :exclude-members: get_router, initialize
+ :special-members: __init__
+```
diff --git a/docs/devices/dataapex/api.rst b/docs/devices/dataapex/api.rst
new file mode 100644
index 00000000..aeb08929
--- /dev/null
+++ b/docs/devices/dataapex/api.rst
@@ -0,0 +1 @@
+.. openapi:: ./clarity.yml
diff --git a/docs/devices/dataapex/clarity.md b/docs/devices/dataapex/clarity.md
new file mode 100644
index 00000000..9a502935
--- /dev/null
+++ b/docs/devices/dataapex/clarity.md
@@ -0,0 +1,38 @@
+# DataApex Clarity (HPLC software)
+
+Clarity is a chromatography data software for data acquisition, processing, and instrument control that can be
+controlled via a command line interface (CLI) as described on the [manufacturer website](https://www.dataapex.com/documentation/Content/Help/110-technical-specifications/110.020-command-line-parameters/110.020-command-line-parameters.htm?Highlight=command%20line).
+
+In `flowchem` we provide a device type, named `Clarity`, to control local Clarity instances via HTTP with flowchem API.
+
+
+## Configuration
+Configuration sample showing all possible parameters:
+
+```toml
+[device.hplc] # This is the 'device' identifier
+type = "Clarity"
+
+# Optional paramters (default shown)
+executable = "C:\\claritychrom\\bin\\claritychrom.exe"
+instrument_number = 1 # Specify the instrument to be controlled (if the same Clarity instance has more than one)
+startup-time = 20 # Max time necessary to start-up Clarity and connect all the instrument specified in the configuration
+startup-method = "startup-method.met" # Method sent to the device upon startup.
+cmd_timeout = 3 # Max amount of time (in s) to wait for the execution of claritychrom.exe commands.
+user = "admin" # Default user name
+password = "" # Empty or option not present for no password
+clarity-cfg-file = "" # Configuration file for Clarity, if e.g. LaunchManager is used to save different configutations
+```
+
+## API methods
+Once configured, a flowchem Clarity object will expose the following commands:
+
+```{eval-rst}
+.. include:: api.rst
+```
+
+## Further information
+Only few of the commands available through Clarity CLI are exposed via flowchem.
+It is possible to add support for more commands if necessary, please refer to the
+[manufacturer website](https://www.dataapex.com/documentation/Content/Help/110-technical-specifications/110.020-command-line-parameters/110.020-command-line-parameters.htm?Highlight=command%20line)
+for a list of all the available options.
diff --git a/docs/devices/dataapex/clarity.yml b/docs/devices/dataapex/clarity.yml
new file mode 100644
index 00000000..995825a1
--- /dev/null
+++ b/docs/devices/dataapex/clarity.yml
@@ -0,0 +1,142 @@
+openapi: 3.0.2
+info:
+ title: Flowchem - devices
+ description: Flowchem is a python library to control a variety of instruments commonly
+ found in chemistry labs.
+ license:
+ name: MIT License
+ url: https://opensource.org/licenses/MIT
+ version: 0.1.0a3
+paths:
+ /hplc/run:
+ put:
+ tags:
+ - hplc
+ - hplc
+ summary: Run
+ description: 'Run one analysis on the instrument. The sample name has to be
+ set in advance via sample-name.
+
+
+ Note that it takes at least 2 sec until the run actually starts (depending
+ on instrument configuration).
+
+ While the export of the chromatogram in e.g. ASCII format can be achieved
+ programmatically via the CLI, the best
+
+ solution is to enable automatic data export for all runs of the HPLC as the
+ chromatogram will be automatically
+
+ exported as soon as the run is finished.'
+ operationId: run_hplc_run_put
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema: {}
+ /hplc/method:
+ put:
+ tags:
+ - hplc
+ - hplc
+ summary: Set Method
+ description: 'Sets the HPLC method (i.e. a file with .MET extension) to the
+ instrument.
+
+
+ Make sure to select ''Send Method to Instrument'' option in Method Sending
+ Options dialog in System Configuration.'
+ operationId: set_method_hplc_method_put
+ parameters:
+ - required: true
+ schema:
+ title: Method Name
+ type: string
+ name: method_name
+ in: query
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema: {}
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /hplc/sample-name:
+ put:
+ tags:
+ - hplc
+ - hplc
+ summary: Set Sample Name
+ description: Sets the name of the sample for the next run.
+ operationId: set_sample_name_hplc_sample_name_put
+ parameters:
+ - required: true
+ schema:
+ title: Sample Name
+ type: string
+ name: sample_name
+ in: query
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema: {}
+ '422':
+ description: Validation Error
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/HTTPValidationError'
+ /hplc/exit:
+ put:
+ tags:
+ - hplc
+ - hplc
+ summary: Exit
+ description: Exit Clarity Chrom.
+ operationId: exit_hplc_exit_put
+ responses:
+ '200':
+ description: Successful Response
+ content:
+ application/json:
+ schema: {}
+components:
+ schemas:
+ HTTPValidationError:
+ title: HTTPValidationError
+ type: object
+ properties:
+ detail:
+ title: Detail
+ type: array
+ items:
+ $ref: '#/components/schemas/ValidationError'
+ ValidationError:
+ title: ValidationError
+ required:
+ - loc
+ - msg
+ - type
+ type: object
+ properties:
+ loc:
+ title: Location
+ type: array
+ items:
+ anyOf:
+ - type: string
+ - type: integer
+ msg:
+ title: Message
+ type: string
+ type:
+ title: Error Type
+ type: string
diff --git a/flowchem/components/devices/Hamilton/Microlab-600-RS-232-Communication-Manual.pdf b/docs/devices/hamilton/Microlab-600-RS-232-Communication-Manual.pdf
similarity index 100%
rename from flowchem/components/devices/Hamilton/Microlab-600-RS-232-Communication-Manual.pdf
rename to docs/devices/hamilton/Microlab-600-RS-232-Communication-Manual.pdf
diff --git a/docs/devices/hamilton/api.rst b/docs/devices/hamilton/api.rst
new file mode 100644
index 00000000..2e3b5a90
--- /dev/null
+++ b/docs/devices/hamilton/api.rst
@@ -0,0 +1 @@
+.. openapi:: ./ml600.yml
diff --git a/docs/devices/hamilton/ml600.md b/docs/devices/hamilton/ml600.md
new file mode 100644
index 00000000..e801669a
--- /dev/null
+++ b/docs/devices/hamilton/ml600.md
@@ -0,0 +1,47 @@
+# Hamilton Syringe Pump ML600
+
+Hamilton ML600 pumps connected via serial (RS-232) cables are supported in flowchem via the `ML600` device type.
+As for all `flowchem` devices, the virtual instrument can be instantiated via a configuration file that generates an
+openAPI endpoint.
+For a standard (single syringe) pump model, two components will be available: one pump and one valve.
+Pumps with multiple syringes are not currently supported.
+
+
+## Configuration
+Configuration sample showing all possible parameters:
+
+```toml
+[device.my-ml600-pump] # This is the pump identifier
+type = "ML600"
+port = "COM1" # This will be /dev/tty* under linux/MacOS
+address= 1 # If multiple devices are daisy chained, number in the chain 1=first...
+syringe_volume = "1 ml" # If the wrong syringe volume is set, the flow rate will be wrong.
+```
+
+```{note} Serial connection parameters
+Note, further parameters for the serial connections (i.e. those accepted by `serial.Serial`) such as `baudrate`,
+`parity`, `stopbits` and `bytesize` can be specified.
+However, it should not be necessary as the following values (which are the default for the instrument) are
+automatically used:
+* baudrate 9600
+* parity even
+* stopbits 1
+* bytesize 7
+```
+
+## API methods
+Once configured, a flowchem ML600 object will expose the following commands:
+
+```{eval-rst}
+.. include:: api.rst
+```
+
+## Device detection
+Lab PCs often have several devices connected via serial ports.
+ML600 pumps can be auto-detected via the `flowchem-autodiscover` command-line utility.
+After having installed flowchem, run `flowchem-autodiscover` to create a configuration stub with all the devices that
+can be auto-detected on your PC.
+
+## Further information
+For further information about connection of the pump to the controlling PC, daisy-chaining via RJ-12 cables etc.
+please refer to the [manufacturer manual](./Microlab-600-RS-232-Communication-Manual.pdf).
diff --git a/docs/devices/harvardapparatus/api.rst b/docs/devices/harvardapparatus/api.rst
new file mode 100644
index 00000000..fd623508
--- /dev/null
+++ b/docs/devices/harvardapparatus/api.rst
@@ -0,0 +1 @@
+.. openapi:: ./elite11.yml
diff --git a/docs/devices/harvardapparatus/elite11.md b/docs/devices/harvardapparatus/elite11.md
new file mode 100644
index 00000000..ec88fa73
--- /dev/null
+++ b/docs/devices/harvardapparatus/elite11.md
@@ -0,0 +1,51 @@
+# Harvard Apparatus Syringe Pump Elite11
+
+## Introduction
+Harvard-Apparatus Elite11 pumps connected via USB cables (which creates a virtual serial port) are supported in flowchem
+via the `Elite11` device type.
+Depending on the pump model, the component might be able of infuse/withdraw or just infusing.
+This difference reflect the existence in commerce of both variants, i.e. pumps only capable of infusion and pumps that
+support both infusion and withdrawing commands.
+
+As for all `flowchem` devices, the virtual instrument can be instantiated via a configuration file that generates an
+openAPI endpoint.
+
+
+## Configuration
+Configuration sample showing all possible parameters:
+
+```toml
+[device.my-elite11-pump] # This is the pump identifier
+type = "Elite11"
+port = "COM11" # This will be /dev/tty* under linux/MacOS
+address = 0 # Only needed for daisy-chaining. The address can be set on the pump, see manufacturer manual.
+syringe_diameter = "4.6 mm"
+syringe_volume = "1 ml"
+baudrate = 115200 # Values between 9,600 and 115,200 can be selected on the pump! (115200 assumed if not specified)
+force = 100 # Value percent, use lower force for smaller syringes, see manual.
+```
+
+```{note} Serial connection parameters
+Note, further parameters for the serial connections (i.e. those accepted by `serial.Serial`) such as `baudrate`,
+`parity`, `stopbits` and `bytesize` can be specified.
+However, it should not be necessary as the following values (which are the default for the instrument) are
+automatically used:
+* baudrate 115200
+```
+
+## API methods
+Once configured, a flowchem Elite11 object will expose the following commands:
+
+```{eval-rst}
+.. include:: api.rst
+```
+
+## Device detection
+Lab PCs often have several devices connected via serial ports.
+Elite11 pumps can be auto-detected via the `flowchem-autodiscover` command-line utility.
+After having installed flowchem, run `flowchem-autodiscover` to create a configuration stub with all the devices that
+can be auto-detected on your PC.
+
+## Further information
+For further information about connection of the pump to the controlling PC, daisy-chaining via firmware cables etc.
+please refer to the [manufacturer manual](./elite11_manual.pdf).
diff --git a/flowchem/components/devices/Harvard_Apparatus/11 Elite & 11 Elite Pico Manual - Rev C.pdf b/docs/devices/harvardapparatus/elite11_manual.pdf
similarity index 100%
rename from flowchem/components/devices/Harvard_Apparatus/11 Elite & 11 Elite Pico Manual - Rev C.pdf
rename to docs/devices/harvardapparatus/elite11_manual.pdf
diff --git a/docs/devices/huber/api.rst b/docs/devices/huber/api.rst
new file mode 100644
index 00000000..fd623508
--- /dev/null
+++ b/docs/devices/huber/api.rst
@@ -0,0 +1 @@
+.. openapi:: ./elite11.yml
diff --git a/docs/devices/huber/chiller.md b/docs/devices/huber/chiller.md
new file mode 100644
index 00000000..66de9ae8
--- /dev/null
+++ b/docs/devices/huber/chiller.md
@@ -0,0 +1,58 @@
+# Huber Chiller
+## Introduction
+The majority of Huber chillers can be controlled via so-called `PB Commands` over serial communication.
+A variety of `PB Commands` are supported in `flowchem`, but some of them may be unavailable on specific models, see the
+[manufacturer documentation](./pb_commands_handbook.pdf) for more details.
+
+As for all `flowchem` devices, the virtual instrument can be instantiated via a configuration file that generates an
+openAPI endpoint.
+
+
+## Configuration
+Configuration sample showing all possible parameters:
+
+```toml
+[device.my-huber-chiller] # This is the chiller identifier
+type = "HuberChiller"
+port = "COM11" # This will be /dev/tty* under linux/MacOS
+min_temp = -100 # Min and max temp can be used to further limit the avaiable temperatures
+max_temp = +250 # e.g. for compatibility with the reaction system.
+```
+
+```{note} Serial connection parameters
+Note, further parameters for the serial connections (i.e. those accepted by `serial.Serial`) such as `baudrate`,
+`parity`, `stopbits` and `bytesize` can be specified.
+However, it should not be necessary as the following values (which are the default for the instrument) are
+automatically used:
+* baudrate 9600 (with Com.G@te other baud rates are possible)
+* parity none
+* stopbits 1
+* bytesize 8
+```
+
+## API methods
+Once configured, a flowchem HuberChiller object will expose the following commands:
+
+```{eval-rst}
+.. include:: api.rst
+```
+
+## Device detection
+Lab PCs often have several devices connected via serial ports.
+Huber's chillers can be auto-detected via the `flowchem-autodiscover` command-line utility.
+After having installed flowchem, run `flowchem-autodiscover` to create a configuration stub with all the devices that
+can be auto-detected on your PC.
+
+## Further information
+For further information please refer to the [manufacturer manual](./pb_commands_handbook.pdf)
+
+```{note} Serial connection parameters
+Note, further parameters for the serial connections (i.e. those accepted by `serial.Serial`) such as `baudrate`,
+`parity`, `stopbits` and `bytesize` can be specified.
+However, it should not be necessary as the following values (which are the default for the instrument) are
+automatically used:
+* baudrate 9600
+* parity even
+* stopbits 1
+* bytesize 7
+```
diff --git a/flowchem/components/devices/Huber/Handbuch_Datenkommunikation_PB_en.pdf b/docs/devices/huber/pb_commands_handbook.pdf
similarity index 100%
rename from flowchem/components/devices/Huber/Handbuch_Datenkommunikation_PB_en.pdf
rename to docs/devices/huber/pb_commands_handbook.pdf
diff --git a/docs/devices/knauer/api-pump.rst b/docs/devices/knauer/api-pump.rst
new file mode 100644
index 00000000..dba23a36
--- /dev/null
+++ b/docs/devices/knauer/api-pump.rst
@@ -0,0 +1 @@
+.. openapi:: ./knauer-pump.yml
diff --git a/docs/devices/knauer/api-valve.rst b/docs/devices/knauer/api-valve.rst
new file mode 100644
index 00000000..7f5429b0
--- /dev/null
+++ b/docs/devices/knauer/api-valve.rst
@@ -0,0 +1 @@
+.. openapi:: ./knauer-valve.yml
diff --git a/docs/devices/knauer/azura_compact.md b/docs/devices/knauer/azura_compact.md
new file mode 100644
index 00000000..9f37c298
--- /dev/null
+++ b/docs/devices/knauer/azura_compact.md
@@ -0,0 +1,40 @@
+# Pump Azura Compact (P 2.1S)
+## Introduction
+The Knauer Azura Compact pumps can be controlled via flowchem.
+
+As for all `flowchem` devices, the virtual instrument can be instantiated via a configuration file that generates an
+openAPI endpoint.
+
+
+## Connection
+Knauer pumps are originally designed to be used with HPLC instruments, so they support ethernet communication.
+Moreover, they feature an autodiscover mechanism that makes it possible to automatically find the device IP address
+of a device given its (immutable) MAC address.
+This enables the use of the valves with dynamic addresses (i.e. with a DHCP server) which simplify the setup procedure.
+
+## Configuration
+Configuration sample showing all possible parameters:
+
+```toml
+[device.my-knauer-pump] # This is the pump identifier
+type = "AzuraCompactPump"
+ip_address = "192.168.2.1" # Onyl one of either ip_address or mac_address need to be provided
+mac_address = "00:11:22:33:44:55" # Onyl one of either ip_address or mac_address need to be provided
+max_pressure = "10 bar" # Optionally, a string with natural language specifying max pressure can be provided
+min_pressure = "5 bar" # Optionally, a string with natural language specifying max pressure can be provided
+```
+
+## API methods
+Once configured, a flowchem AzuraCompactPump object will expose the following commands:
+
+```{eval-rst}
+.. include:: api-pump.rst
+```
+
+## Device detection
+Azura Compact pumps can be auto-detected via the `flowchem-autodiscover` command-line utility.
+After having installed flowchem, run `flowchem-autodiscover` to create a configuration stub with all the devices that
+can be auto-detected on your PC.
+
+## Further information
+For further information please refer to the [manufacturer manual](./pump_p2.1s_instructions.pdf)
diff --git a/flowchem/components/devices/Knauer/V6870 _P2.1S_P4.1S_Instructions.pdf b/docs/devices/knauer/pump_p2.1s_instructions.pdf
similarity index 100%
rename from flowchem/components/devices/Knauer/V6870 _P2.1S_P4.1S_Instructions.pdf
rename to docs/devices/knauer/pump_p2.1s_instructions.pdf
diff --git a/flowchem/components/devices/Knauer/V6855_AZURA_Valve_Unifier_VU_4.1_Instructions_EN.pdf b/docs/devices/knauer/valve_instructions_en.pdf
similarity index 100%
rename from flowchem/components/devices/Knauer/V6855_AZURA_Valve_Unifier_VU_4.1_Instructions_EN.pdf
rename to docs/devices/knauer/valve_instructions_en.pdf
diff --git a/docs/devices/knauer/valves.md b/docs/devices/knauer/valves.md
new file mode 100644
index 00000000..0163e8ea
--- /dev/null
+++ b/docs/devices/knauer/valves.md
@@ -0,0 +1,48 @@
+# Knauer Valves
+## Introduction
+A range of different valve heads can be mounted on the same Knauer actuator, so several type of valves can be controlled
+with the same protocol. Both standard 6-port-2-position injection valve and multi-position valves
+(with 6, 12 or 16 ports) can be controlled via flowchem.
+
+As for all `flowchem` devices, the virtual instrument can be instantiated via a configuration file that generates an
+openAPI endpoint.
+
+
+## Connection
+Knauer valves are originally designed to be used with HPLC instruments, so they support ethernet communication.
+Moreover, they feature an autodiscover mechanism that makes it possible to automatically find the device IP address
+of a device given its (immutable) MAC address.
+This enables the use of the valves with dynamic addresses (i.e. with a DHCP server) which simplify the setup procedure.
+
+
+## Configuration
+Configuration sample showing all possible parameters:
+
+```toml
+[device.my-knauer-valve] # This is the valve identifier
+type = "KnauerValve" # The actual valve type will be detected automatically
+ip_address = "192.168.2.1" # Onyl one of either ip_address or mac_address need to be provided
+mac_address = "00:11:22:33:44:55" # Onyl one of either ip_address or mac_address need to be provided
+```
+
+## API methods
+Once configured, a flowchem Knauer6Port2PositionValve object will expose the following commands:
+
+```{eval-rst}
+.. include:: api-pump.rst
+```
+
+## Valve positions
+The valve position naming follow the general convention of flowchem, depending on the valve type
+(see [Base Valve](../../models/valves/base_valve.md):
+* Injection valves have position named 'load' and 'inject'
+* Distribution valves have positions from '1' to 'n' where n is the total amount of port available.
+
+## Device detection
+Knauer Valves can be auto-detected via the `flowchem-autodiscover` command-line utility.
+After having installed flowchem, run `flowchem-autodiscover` to create a configuration stub with all the devices that
+can be auto-detected on your PC.
+
+
+## Further information
+For further information please refer to the [manufacturer manual](./valve_instructions_en.pdf)
diff --git a/docs/devices/magritek/api.rst b/docs/devices/magritek/api.rst
new file mode 100644
index 00000000..c78f38b3
--- /dev/null
+++ b/docs/devices/magritek/api.rst
@@ -0,0 +1 @@
+.. openapi:: ./spinsolve.yml
diff --git a/docs/devices/magritek/spinsolve.md b/docs/devices/magritek/spinsolve.md
new file mode 100644
index 00000000..c278de6d
--- /dev/null
+++ b/docs/devices/magritek/spinsolve.md
@@ -0,0 +1,48 @@
+# Magritek Spinsolve
+```{admonition} Additional plugin needed!
+:class: attention
+
+To use Spinsolve devices the external plugin `flowchem-spinsolve` is needed!
+
+Install it with `python -m pip install flowchem-spinsolve`!
+```
+
+## Introduction
+The bench-top NMRs from Magritek are controlled by the proprietary software Spinsolve.
+Spinsolve can be controlled remotely via XML over HTTP.
+
+As for all `flowchem` devices, a Spinsolve virtual instrument can be instantiated via a configuration file that generates an openAPI endpoint.
+A peculiarity of controlling the NMR in this way is that the FIDs acquired are stored on
+the computer where spinsolve is installed, which may or may not be the same PC where flowchem
+is running.
+Some utility functions are provided in case you are controlling Spinsolve on a different PC than the one running flowchem, see below for more details.
+
+
+## Configuration
+Configuration sample showing all possible parameters:
+
+```toml
+[device.my-benchtop-nmr] # This is the valve identifier
+type = "Spinsolve"
+host = "127.0.0.1" # IP address of the PC running Spinsolve, 127.0.0.1 for local machine. Only necessary parameter.
+port = 13000 # Default spinsolve port
+sample_name = "automated-experiment"
+solvent = "chloroform-d"
+data_folder = "D:\\data2q\\my-experiment"
+remote_to_local_mapping = ["D:\\data2q", "\\BSMC-7WP43Y1\\data2q"]
+```
+
+## API methods
+Once configured, a flowchem Spinsolve object will expose the following commands:
+
+```{eval-rst}
+.. include:: api.rst
+```
+
+## Remote control
+When controlling a Spinsolve instance running on a remote PC, it is necessary that the FIDs are saved in a folder that
+is accessible from the PC running flowchem as the Spinsolve API does not natively allow for file transfer.
+If network drive are used, a location with the same name can be used on both PC.
+If that is not the case, a `remote_to_local_mapping` parameter can be used to translate the remote file hierarchy to the
+local (flowchem-accessible) one.
+Incidentally, this enables the file sharing across PC with different operative system, e.g. if flowchem is running on linux.
diff --git a/docs/devices/manson/api.rst b/docs/devices/manson/api.rst
new file mode 100644
index 00000000..15bc5c88
--- /dev/null
+++ b/docs/devices/manson/api.rst
@@ -0,0 +1 @@
+.. openapi:: ./manson.yml
diff --git a/docs/devices/manson/manson.md b/docs/devices/manson/manson.md
new file mode 100644
index 00000000..5d825655
--- /dev/null
+++ b/docs/devices/manson/manson.md
@@ -0,0 +1,28 @@
+# Manson Laboratory Power Supply
+
+## Introduction
+The following models of Manson lab power supply are supported: "HCS-3102", "HCS-3014", "HCS-3204" and "HCS-3202".
+Once connected via USB, they are recognized as a virtual serial port and are supported in `flowchem` via the device type `MansonPowerSupply`.
+
+As for all `flowchem` devices, the virtual instrument can be instantiated via a configuration file that generates an openAPI endpoint.
+
+
+## Configuration
+Configuration sample showing all possible parameters:
+
+```toml
+[device.my-power-supply] # This is the device name
+type = "MansonPowerSupply"
+port = "COM12" # This will be /dev/tty* under linux/MacOS
+```
+
+```{note} Serial connection parameters
+Note, further parameters for the serial connections (i.e. those accepted by `serial.Serial`) such as `baudrate`, `parity`, `stopbits` and `bytesize` can be specified.
+```
+
+## API methods
+Once configured, a flowchem MansonPowerSupply object will expose the following commands:
+
+```{eval-rst}
+.. include:: api.rst
+```
diff --git a/docs/devices/mettler/flowir.md b/docs/devices/mettler/flowir.md
new file mode 100644
index 00000000..dd3e427d
--- /dev/null
+++ b/docs/devices/mettler/flowir.md
@@ -0,0 +1,3 @@
+# FlowIR
+
+A valid iCIR template name must be specified!
diff --git a/flowchem/core/server/__init__.py b/docs/devices/phidgets/p_sensor.md
similarity index 100%
rename from flowchem/core/server/__init__.py
rename to docs/devices/phidgets/p_sensor.md
diff --git a/docs/devices/supported_devices.md b/docs/devices/supported_devices.md
new file mode 100644
index 00000000..9195a66d
--- /dev/null
+++ b/docs/devices/supported_devices.md
@@ -0,0 +1,36 @@
+# Devices
+
+The following devices are currently supported in flowchem:
+
+| Manufacturer | Device / Model | `flowchem` name | `flowchem` components | Auto-discoverable |
+|------------------|--------------------|-------------------|-------------------------------------|:-----------------:|
+| DataApex | Clarity | Clarity | HPLCControl | NO |
+| Hamilton | ML600 | ML600 | SyringePump, DistributionValve | YES |
+| HarvardApparatus | Elite11 | Elite11 | SyringePump | YES |
+| Huber | various | HuberChiller | TemperatureControl | YES |
+| Knauer | Azura Compact | AzuraCompact | HPLCPump, PressureSensor | YES |
+| Knauer | V 2.1S | KnauerValve | InjectionValve or DistributionValve | YES |
+| Magritek | Spinsolve | Spinsolve | NMRControl | NO |
+| Manson | HCS-3102 family | MansonPowerSupply | PowerSupply | NO |
+| Mettler Toledo | iCIR | FlowIR | IRControl | NO |
+| Phidgets | VINT | PressureSensor | PressureSensor | NO |
+| Vici Valco | Universal Actuator | ViciValve | InjectionValve | NO |
+
+
+```{toctree}
+:maxdepth: 1
+:caption: Devices
+
+dataapex/clarity
+hamilton/ml600
+harvardapparatus/elite11
+huber/chiller
+knauer/azura_compact
+knauer/valves
+magritek/spinsolve
+manson/manson
+mettler/flowir
+phidgets/p_sensor
+vicivlaco/vicivalve
+
+```
diff --git a/docs/devices/vicivalco/api.rst b/docs/devices/vicivalco/api.rst
new file mode 100644
index 00000000..384b3837
--- /dev/null
+++ b/docs/devices/vicivalco/api.rst
@@ -0,0 +1 @@
+.. openapi:: ./vici.yml
diff --git a/docs/devices/vicivalco/universal-actuator.pdf b/docs/devices/vicivalco/universal-actuator.pdf
new file mode 100644
index 00000000..e7b88b59
Binary files /dev/null and b/docs/devices/vicivalco/universal-actuator.pdf differ
diff --git a/docs/devices/vicivalco/vicivalve.md b/docs/devices/vicivalco/vicivalve.md
new file mode 100644
index 00000000..83b473fb
--- /dev/null
+++ b/docs/devices/vicivalco/vicivalve.md
@@ -0,0 +1,55 @@
+# Vici Valco Valves
+## Introduction
+While different valve heads can be mounted on the same Vici Universal actuator, so far only injection valves are
+supported, as they are the most common type.
+Support for additional valve types can be trivially added, based on the example of Knauer Valves.
+
+As for all `flowchem` devices, the virtual instrument can be instantiated via a configuration file that generates an
+openAPI endpoint.
+
+
+## Connection
+Depending on the device options, Vici valves can be controlled in different ways.
+The code here reported assumes serial communication, but can be easily ported to different connection type if necessary.
+
+## Configuration
+Configuration sample showing all possible parameters:
+
+```toml
+[device.my-vici-valve] # This is the valve identifier
+type = "ViciValve"
+port = "COM11" # This will be /dev/tty* under linux/MacOS
+address = 0 # Only needed for daisy-chaining. The address can be set on the pump, see manufacturer manual.
+```
+
+```{note} Serial connection parameters
+Note, further parameters for the serial connections (i.e. those accepted by `serial.Serial`) such as `baudrate`,
+`parity`, `stopbits` and `bytesize` can be specified.
+However, it should not be necessary as the default for the instrument are automatically used.
+```
+
+## API methods
+Once configured, a flowchem ViciValve object will expose the following commands:
+
+```{eval-rst}
+.. include:: api.rst
+```
+
+## Valve positions
+The valve position naming follow the general convention of flowchem (see [Base Valve](../../models/valves/base_valve.md):
+* Injection valves have position named 'LOAD' and 'INJECT'
+* Multiposition valves have positions from '1' to 'n' where n is the total amount of port available.
+
+## Device detection
+Knauer Valves can be auto-detected via the `flowchem-autodiscover` command-line utility.
+After having installed flowchem, run `flowchem-autodiscover` to create a configuration stub with all the devices that
+can be auto-detected on your PC.
+
+```{note} Valve types
+Note that the actual type of valve cannot be detected automatically, so you will need to replace the generic
+`KnauerValve` type in the configuration with one of the valid device types (i.e. one of `Knauer6Port2PositionValve`,
+`Knauer6Port6PositionValve`, `Knauer12PortValve` and `Knauer16PortValve`)
+```
+
+## Further information
+For further information please refer to the [manufacturer manual](./valve_instructions_en.pdf)
diff --git a/docs/getting_started.md b/docs/getting_started.md
new file mode 100644
index 00000000..eab97a3f
--- /dev/null
+++ b/docs/getting_started.md
@@ -0,0 +1,55 @@
+# Getting started
+
+Welcome to the new users guide to flowchem!
+
+Flowchem is a python library to control a variety of instruments commonly found in chemistry labs.
+
+## Installing flowchem
+While the RESTful API created by flowchem can be consumed from different programs and programming languages, flowchem itself is written in the popular open-source language Python.
+
+If you already have Python version 3.10 or above, you can install flowchem with pipx as follows:
+```shell
+pip install pipx
+pipx ensurepath
+pipx install flowchem
+```
+This is the recommended way because it will:
+* install flowchem in a virtualenv, without messing up with your system interpreter
+* make the `flowchem` command available system-wide, by adding it to the system PATH
+
+Alternatively, you can install it *normally* via pip with:
+```shell
+pip install flowchem
+```
+
+If you don’t have Python yet, you can download it from [python.org](https://www.python.org/downloads/).
+
+To verify the installation has been completed successfully you can run `flowchem --version` command.
+
+## How to use flowchem
+Flowchem needs one device configuration file that specify the connection settings for all the devices to be controlled.
+To save time, flowchem can autodetect many of the supported device type and generate a configuration stub.
+This is done by running the `flowchem-autodiscover` program from the command line.
+
+This is the result of running `flowchem-autodiscover` on a PC with FIXME WHAT connected to it.
+```shell
+```
+
+A new file named `blabla` has been created in the current working directory, with the following content:
+```toml
+```
+
+```{note}
+While technically `my-devices.toml` is written in the markup language [TOML format](https://en.wikipedia.org/wiki/TOML),
+the syntax of this language is intuitive and designed to be as human-editable as possible.
+If you follow this guide you will not need to learn anything about the TOML syntax but you can just copy and modify the
+example provided.
+```
+
+
+As you can see, a few placemarks in the autogenerated file have to be replaced by actual settings.
+For example, bla
+
+
+
+If you have comments or suggestions, please don’t hesitate to [reach out](./community.md)!
diff --git a/docs/images/flowchem_models.svg b/docs/images/flowchem_models.svg
new file mode 100644
index 00000000..de52be69
--- /dev/null
+++ b/docs/images/flowchem_models.svg
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/docs/index.md b/docs/index.md
index 14f2ae58..a04aca36 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,20 +1,15 @@
-.. Flowchem documentation master file, created by
- sphinx-quickstart on Thu Sep 30 12:19:43 2021.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
Welcome to Flowchem's documentation!
====================================
-.. toctree::
- :maxdepth: 2
- :caption: Contents:
+Select a topic from the list below, or read the [Getting Started](./getting_started.md) guide.
+
+```{toctree}
+:maxdepth: 2
+getting_started
+devices/supported_devices
-Indices and tables
-==================
+contributing/index.md
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
+```
diff --git a/docs/json2yml.py b/docs/json2yml.py
new file mode 100644
index 00000000..7993ce22
--- /dev/null
+++ b/docs/json2yml.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+import json
+import sys
+
+import yaml
+
+print(
+ yaml.dump(json.load(open(sys.argv[1])), default_flow_style=False, sort_keys=False)
+)
diff --git a/examples/Hamilton_ML600.py b/examples/Hamilton_ML600.py
deleted file mode 100644
index e368bc3d..00000000
--- a/examples/Hamilton_ML600.py
+++ /dev/null
@@ -1,78 +0,0 @@
-""" Example file for controlling Hamilton ML600 pumps with fllowchem """
-import asyncio
-from flowchem import ML600
-
-conf_pump1 = {
- "port": "COM12",
- "address": 1,
- "name": "water",
- "syringe_volume": 5,
-}
-
-conf_pump2 = {
- "port": "COM12",
- "address": 2,
- "name": "acetone",
- "syringe_volume": 5,
-}
-
-
-async def example(p1: ML600, p2: ML600):
- """Example code for Hamilton ML600 pumps"""
- # Initialize pumps.
- await p1.initialize_pump()
- await p2.initialize_pump()
-
- # We can also run commands on different pumps concurrently
- await asyncio.gather(p1.initialize_pump(), p2.initialize_pump())
-
- # Let's set the valve position to inlet
- await p1.set_valve_position(ML600.ValvePositionName.INPUT)
- await p2.set_valve_position(ML600.ValvePositionName.INPUT)
-
- # Let's change valve positions a couple of time
- print(f"Pump 1 valve position is now {await p1.get_valve_position()}")
- await p1.set_valve_position(ML600.ValvePositionName.OUTPUT)
- print(f"Pump 1 valve position is now {await p1.get_valve_position()}")
- await p1.set_valve_position(ML600.ValvePositionName.INPUT)
-
- # Valve position commands are special because, as default, they return only at the end of the movement.
- # You can avoid this by passing wait_for_movement_end=False.
- # The reason for this behaviour is that, while it is intuitive the need to wait for a syringe movement,
- # awaiting for the end of a brief valve movement is often forgotten.
- await p1.set_valve_position(
- ML600.ValvePositionName.OUTPUT, wait_for_movement_end=False
- )
- print(f"Pump 1 valve position is now {await p1.get_valve_position()}")
- await p1.set_valve_position(ML600.ValvePositionName.INPUT)
-
- # Note that all the speed parameters are intended in seconds for full stroke, i.e. seconds for syringe_volume
- await p1.to_volume(target_volume=0, speed=10)
- # We suggest to call the class methods with the full keywords and not positionally.
- # For example this line is a lot less readable:
- await p2.to_volume(0, 10)
-
- # Then we can rapidly fill our syringes
- await asyncio.gather(
- p1.to_volume(p1.syringe_volume, speed=10),
- p2.to_volume(p2.syringe_volume, speed=10),
- )
- # And let's wait for the movement to be over
- await asyncio.gather(p1.wait_until_idle(), p2.wait_until_idle())
-
- # And pump in the outlet port
- await p1.set_valve_position(ML600.ValvePositionName.OUTPUT)
- await p2.set_valve_position(ML600.ValvePositionName.OUTPUT)
-
- # If you find the stroke per second not convienent, the utility function ML600.flowrate_to_seconds_per_stroke
- # can be used to translate flow rate in seconds per stroke.
- speed1 = p1.flowrate_to_seconds_per_stroke(flowrate_in_ml_min=0.5)
- speed2 = p1.flowrate_to_seconds_per_stroke(flowrate_in_ml_min=0.75)
- await p1.to_volume(target_volume=0, speed=speed1)
- await p2.to_volume(target_volume=0, speed=speed2)
-
-
-pump1 = ML600.from_config(conf_pump1)
-pump2 = ML600.from_config(conf_pump2)
-
-asyncio.run(example(pump1, pump2))
diff --git a/examples/Huber_chiller.py b/examples/Huber_chiller.py
deleted file mode 100644
index bebd4fdd..00000000
--- a/examples/Huber_chiller.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import asyncio
-import time
-
-import aioserial
-from flowchem import HuberChiller
-
-chiller = HuberChiller(aioserial.AioSerial(url="COM1"))
-
-
-async def main():
- # Set target temperature
- await chiller.set_temperature_setpoint("35 °C")
- # Start temperature control
- await chiller.start_temperature_control()
- # Start recirculation
- await chiller.start_circulation()
-
- for _ in range(6):
- int_temp = await chiller.internal_temperature()
- process_temp = await chiller.process_temperature()
- ret_temp = await chiller.return_temperature()
- water_in_temp = await chiller.cooling_water_temp()
- water_out_temp = await chiller.cooling_water_temp_outflow()
-
- print(
- "Current temperatures are:\n"
- f"\tInternal = {int_temp}\n"
- f"\tProcess = {process_temp}\n"
- f"\tReturn = {ret_temp}\n"
- f"\tWater Inlet = {water_in_temp}\n"
- f"\tWater Outlet = {water_out_temp}\n"
- )
-
- time.sleep(10)
-
- # Stop temperature control
- await chiller.stop_temperature_control()
-
- time.sleep(10)
-
- # Stop circulation
- await chiller.stop_circulation()
-
-
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/examples/autonomous_reaction_optimization/README.md b/examples/autonomous_reaction_optimization/README.md
new file mode 100644
index 00000000..c5727d16
--- /dev/null
+++ b/examples/autonomous_reaction_optimization/README.md
@@ -0,0 +1 @@
+# Autonomous reaction optimization
diff --git a/examples/autonomous_reaction_optimization/_hw_control.py b/examples/autonomous_reaction_optimization/_hw_control.py
new file mode 100644
index 00000000..58216a18
--- /dev/null
+++ b/examples/autonomous_reaction_optimization/_hw_control.py
@@ -0,0 +1,36 @@
+import contextlib
+
+import requests
+from loguru import logger
+
+HOST = "127.0.0.1"
+PORT = 8000
+api_base = f"http://{HOST}:{PORT}"
+socl2_endpoint = f"{api_base}/socl2"
+hexyldecanoic_endpoint = f"{api_base}/hexyldecanoic"
+r4_channel = 0
+r4_endpoint = f"{api_base}/r4-heater/{r4_channel}"
+flowir_endpoint = f"{api_base}/flowir"
+
+__all__ = [
+ "socl2_endpoint",
+ "hexyldecanoic_endpoint",
+ "r4_endpoint",
+ "command_session",
+ "flowir_endpoint",
+]
+
+
+def check_for_errors(resp, *args, **kwargs):
+ resp.raise_for_status()
+
+
+def log_responses(resp, *args, **kwargs):
+ logger.debug(f"Reply: {resp.text} on {resp.url}")
+
+
+@contextlib.contextmanager
+def command_session():
+ with requests.Session() as session:
+ session.hooks["response"] = [log_responses, check_for_errors]
+ yield session
diff --git a/examples/autonomous_reaction_optimization/devices.toml b/examples/autonomous_reaction_optimization/devices.toml
new file mode 100644
index 00000000..9711108e
--- /dev/null
+++ b/examples/autonomous_reaction_optimization/devices.toml
@@ -0,0 +1,20 @@
+[device.socl2]
+type = "Elite11"
+port = "COM4"
+syringe_diameter = "14.567 mm"
+syringe_volume = "10 ml"
+baudrate = 115200
+
+[device.hexyldecanoic]
+type = "AzuraCompact"
+ip_address = "192.168.1.119"
+max_pressure = "10 bar"
+
+#[device.r4-heater]
+#type = "R4Heater"
+#port = "COM1"
+
+[device.flowir]
+type = "IcIR"
+url = "opc.tcp://localhost:62552/iCOpcUaServer"
+template = "30sec_2days.iCIRTemplate"
diff --git a/examples/autonomous_reaction_optimization/limits.in b/examples/autonomous_reaction_optimization/limits.in
new file mode 100644
index 00000000..d016cc9d
--- /dev/null
+++ b/examples/autonomous_reaction_optimization/limits.in
@@ -0,0 +1,3 @@
+#Peak Start Stop
+sm 1690 1755
+product 1755 1830
diff --git a/examples/autonomous_reaction_optimization/main_loop.py b/examples/autonomous_reaction_optimization/main_loop.py
new file mode 100644
index 00000000..43f6ca3f
--- /dev/null
+++ b/examples/autonomous_reaction_optimization/main_loop.py
@@ -0,0 +1,74 @@
+import time
+
+from _hw_control import *
+from gryffin import Gryffin
+from loguru import logger
+from run_experiment import run_experiment
+
+from examples.autonomous_reaction_optimization._hw_control import command_session
+
+logger.add("./xp.log", level="INFO")
+
+# load config
+config = {
+ "parameters": [
+ {"name": "SOCl2_equivalent", "type": "continuous", "low": 1.0, "high": 1.5},
+ {"name": "temperature", "type": "continuous", "low": 30, "high": 65},
+ {"name": "residence_time", "type": "continuous", "low": 2, "high": 20},
+ ],
+ "objectives": [
+ {"name": "product_ratio_IR", "goal": "max"},
+ ],
+}
+
+# Initialize gryffin
+gryffin = Gryffin(config_dict=config)
+observations = []
+
+# Initialize hardware
+with command_session() as sess:
+ # Heater to r.t.
+ sess.put(r4_endpoint + "/temperature", params={"temperature": "21"})
+ sess.put(r4_endpoint + "/power-on")
+
+ # Start pumps with low flow rate
+ sess.put(socl2_endpoint + "/flow-rate", params={"rate": "5 ul/min"})
+ sess.put(socl2_endpoint + "/infuse")
+
+ sess.put(hexyldecanoic_endpoint + "/flow-rate", params={"rate": "50 ul/min"})
+ sess.put(hexyldecanoic_endpoint + "/infuse")
+
+ # Ensure iCIR is running
+ assert (
+ sess.get(flowir_endpoint + "/is-connected").text == "true"
+ ), "iCIR app must be open on the control PC"
+ # If IR is running I just reuse previous experiment. Because cleaning the probe for the BG is slow
+ status = sess.get(flowir_endpoint + "/probe-status")
+ if status == " Not running":
+ # Start acquisition
+ xp = {
+ "template": "30sec_2days.iCIRTemplate",
+ "name": "hexyldecanoic acid chlorination - automated",
+ }
+ sess.put(flowir_endpoint + "/experiment/start", params=xp)
+
+
+# Run optimization for MAX_TIME
+MAX_TIME = 8 * 60 * 60
+start_time = time.monotonic()
+
+while time.monotonic() < (start_time + MAX_TIME):
+ # query gryffin for new conditions_to_test, 1 exploration 1 exploitation (i.e. lambda 1 and -1)
+ conditions_to_test = gryffin.recommend(
+ observations=observations, num_batches=1, sampling_strategies=[-1, 1]
+ )
+
+ # evaluate the proposed parameters!
+ for conditions in conditions_to_test:
+ # Get this from your experiment!
+ conditions["product_ratio_IR"] = run_experiment(**conditions)
+
+ logger.info(f"Experiment ended: {conditions}")
+
+ observations.extend(conditions_to_test)
+ logger.info(observations)
diff --git a/examples/autonomous_reaction_optimization/plot/plot.py b/examples/autonomous_reaction_optimization/plot/plot.py
new file mode 100644
index 00000000..a9d94ebe
--- /dev/null
+++ b/examples/autonomous_reaction_optimization/plot/plot.py
@@ -0,0 +1,34 @@
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+
+# Time 2.5, 5.0, 7.5, 10, 12.5, 15
+time = np.linspace(25, 150, 6)
+time = time / 10
+
+# Temp 50, 60, 70, 80, 90
+temp = np.linspace(50, 90, 5)
+
+# Data
+df = pd.DataFrame.from_dict(
+ {
+ "2.5": [0, 1, 2, 4, 8],
+ "5": [2, 4, 8, 16, 32],
+ "7.5": [4, 1, 2, 4, 8],
+ "10": [6, 1, 2, 4, 8],
+ "12.5": [8, 20, 50, 70, 90],
+ "15": [20, 50, 70, 90, 100],
+ }
+)
+df.index.name = "time"
+df.columns.name = "temp"
+
+with plt.xkcd():
+ fig, ax = plt.subplots()
+ plt.pcolormesh(time, temp, np.array(df))
+ ax.set_title("Fake data :D")
+ ax.set_xlabel("Time (min)")
+ ax.set_ylabel("Temp (C)")
+ ax.set_xticks([2.5, 5, 7.5, 10, 12.5, 15])
+ fig.tight_layout()
+ plt.show()
diff --git a/examples/autonomous_reaction_optimization/run_experiment.py b/examples/autonomous_reaction_optimization/run_experiment.py
new file mode 100644
index 00000000..49277af8
--- /dev/null
+++ b/examples/autonomous_reaction_optimization/run_experiment.py
@@ -0,0 +1,171 @@
+import time
+
+import numpy as np
+import pandas as pd
+from _hw_control import *
+from loguru import logger
+from scipy import integrate
+
+
+def calculate_flow_rates(SOCl2_equivalent: float, residence_time: float):
+ """
+ Calculate pump flow rate based on target residence time and SOCl2 equivalents
+
+ Stream A: hexyldecanoic acid ----|
+ |----- REACTOR ---- IR ---- waste
+ Stream B: thionyl chloride ----|
+
+ Args:
+ SOCl2_equivalent:
+ residence_time:
+
+ Returns: dict with pump names and flow rate in ml/min
+
+ """
+ REACTOR_VOLUME = 10 # ml
+ HEXYLDECANOIC_ACID = 1.374 # Molar
+ SOCl2 = 13.768 # Molar
+
+ total_flow_rate = REACTOR_VOLUME / residence_time # ml/min
+
+ # Solving a system of 2 equations and 2 unknowns...
+ return {
+ "hexyldecanoic": (
+ a := (total_flow_rate * SOCl2)
+ / (HEXYLDECANOIC_ACID * SOCl2_equivalent + SOCl2)
+ ),
+ "socl2": total_flow_rate - a,
+ }
+
+
+def set_parameters(rates: dict, temperature: float):
+ with command_session() as sess:
+ sess.put(
+ socl2_endpoint + "/flow-rate", params={"rate": f"{rates['socl2']} ml/min"}
+ )
+ sess.put(
+ hexyldecanoic_endpoint + "/flow-rate",
+ params={"rate": f"{rates['hexyldecanoic']} ml/min"},
+ )
+
+ # Sets heater
+ heater_data = {"temperature": f"{temperature:.2f} °C"}
+ sess.put(r4_endpoint + "/temperature", params=heater_data)
+
+
+def wait_stable_temperature():
+ """Wait until the ste temperature has been reached."""
+ logger.info("Waiting for the reactor temperature to stabilize")
+ while True:
+ with command_session() as sess:
+ r = sess.get(r4_endpoint + "/target-reached")
+ if r.text == "true":
+ logger.info("Stable temperature reached!")
+ break
+ else:
+ time.sleep(5)
+
+
+def get_ir_once_stable():
+ """Keeps acquiring IR spectra until changes are small, then returns the spectrum."""
+ logger.info("Waiting for the IR spectrum to be stable")
+ with command_session() as sess:
+ # Wait for first spectrum to be available
+ while int(sess.get(flowir_endpoint + "/sample-count").text) == 0:
+ time.sleep(1)
+ # Get spectrum
+ previous_spectrum = pd.read_json(
+ sess.get(flowir_endpoint + "/sample/spectrum-treated").text
+ )
+ previous_spectrum = previous_spectrum.set_index("wavenumber")
+ # In case the id has changed between requests (highly unlikely)
+ last_sample_id = int(sess.get(flowir_endpoint + "/sample-count").text)
+
+ while True:
+ # Wait for a new spectrum
+ while True:
+ with command_session() as sess:
+ current_sample_id = int(
+ sess.get(flowir_endpoint + "/sample-count").text
+ )
+ if current_sample_id > last_sample_id:
+ break
+ else:
+ time.sleep(2)
+
+ with command_session() as sess:
+ current_spectrum = pd.read_json(
+ sess.get(flowir_endpoint + "/sample/spectrum-treated").text
+ )
+ current_spectrum = current_spectrum.set_index("wavenumber")
+
+ previous_peaks = integrate_peaks(previous_spectrum)
+ current_peaks = integrate_peaks(current_spectrum)
+
+ delta_product_ratio = abs(current_peaks["product"] - previous_peaks["product"])
+ logger.info(f"Current product ratio is {current_peaks['product']}")
+ logger.debug(f"Delta product ratio is {delta_product_ratio}")
+
+ if delta_product_ratio < 0.002: # 0.2% error on ratio
+ logger.info("IR spectrum stable!")
+ return current_peaks
+
+ previous_spectrum = current_spectrum
+ last_sample_id = current_sample_id
+
+
+def integrate_peaks(ir_spectrum):
+ """Integrate areas from `limits.in` in the spectrum provided."""
+ # List of peaks to be integrated
+ peak_list = np.recfromtxt("limits.in", encoding="UTF-8")
+
+ peaks = {}
+ for name, start, end in peak_list:
+ # This is a common mistake since wavenumber are plot in reverse order
+ if start > end:
+ start, end = end, start
+
+ df_view = ir_spectrum.loc[
+ (start <= ir_spectrum.index) & (ir_spectrum.index <= end)
+ ]
+ peaks[name] = integrate.trapezoid(df_view["intensity"])
+ logger.debug(f"Integral of {name} between {start} and {end} is {peaks[name]}")
+
+ # Normalize integrals
+
+ return {k: v / sum(peaks.values()) for k, v in peaks.items()}
+
+
+def run_experiment(
+ SOCl2_equivalent: float, temperature: float, residence_time: float
+) -> float:
+ """
+ Runs one experiment with the provided conditions
+
+ Args:
+ SOCl2_equivalent: SOCl2 to substrate ratio
+ temperature: in Celsius
+ residence_time: in minutes
+
+ Returns: IR product area / (SM + product areas)
+
+ """
+ logger.info(
+ f"Starting experiment with {SOCl2_equivalent:.2f} eq SOCl2, {temperature:.1f} degC and {residence_time:.2f} min"
+ )
+ # Set stand-by flow-rate first
+ set_parameters({"hexyldecanoic": "0.1 ml/min", "socl2": "10 ul/min"}, temperature)
+ wait_stable_temperature()
+ # Set actual flow rate once the set temperature has been reached
+ pump_flow_rates = calculate_flow_rates(SOCl2_equivalent, residence_time)
+ set_parameters(pump_flow_rates, temperature)
+ # Wait 1 residence time
+ time.sleep(residence_time * 60)
+ # Start monitoring IR
+ peaks = get_ir_once_stable()
+
+ return peaks["product"]
+
+
+if __name__ == "__main__":
+ print(get_ir_once_stable())
diff --git a/examples/k16valve.py b/examples/k16valve.py
deleted file mode 100644
index af800ee1..00000000
--- a/examples/k16valve.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import asyncio
-
-from flowchem import Knauer16PortValve
-
-DELAY = 60 * 5 # in sec
-START_POSITION = 1 # First position for collection
-
-
-async def main():
- valve = Knauer16PortValve(ip_address="192.168.1.122")
- await valve.initialize()
-
- position = START_POSITION
-
- while True:
- await valve.switch_to_position(str(position))
- await asyncio.sleep(DELAY)
- position += 1
- if position > 16:
- position = 1
-
-
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/examples/limits.in b/examples/limits.in
deleted file mode 100644
index 667ce938..00000000
--- a/examples/limits.in
+++ /dev/null
@@ -1,6 +0,0 @@
-#Peak Start Stop
-SM -59.43 -59.72
-Unk-1 -56.15 -56.50
-product -56.60 -57.05
-Unk-6 -59.11 -59.38
-
diff --git a/examples/nmr control + autointegration.py b/examples/nmr control + autointegration.py
deleted file mode 100644
index 8ea66a61..00000000
--- a/examples/nmr control + autointegration.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import asyncio
-import glob
-import os
-from pathlib import Path
-import numpy as np
-import pandas as pd
-import matplotlib.pyplot as plt
-import itertools as it
-from flowchem.components.devices.Magritek import Spinsolve, NMRSpectrum
-from flowchem import Knauer16PortValve
-
-
-Collector_DELAY = 60 * 30 # in sec
-START_POSITION = 7 # First position for collection
-
-
-async def Collector():
- valve = Knauer16PortValve(ip_address="192.168.1.122")
- await valve.initialize()
-
- position = START_POSITION
-
- while True:
- await valve.switch_to_position(str(position))
- await asyncio.sleep(Collector_DELAY)
- position += 1
- if position > 16:
- position = 1
-
-
-NMR_DELAY = 60 * 2 # in sec
-counter = it.count()
-
-# read in the integration limits
-peak_list = np.recfromtxt("limits.in", encoding="UTF-8")
-NMR_forlder_month = r"C:\Projects\Data\2022\03"
-
-
-async def Analysis(observed_result):
- nmr = Spinsolve(host="BSMC-YMEF002121")
-
- while True:
- path = await nmr.run_protocol(
- "1D FLUORINE+",
- {
- "Number": 128,
- "AcquisitionTime": 3.2,
- "RepetitionTime": 2,
- "PulseAngle": 90,
- },
- )
- observed_time = (NMR_DELAY / 60 + 4) * next(counter)
- if str(path) == ".":
- # continue
- dir_list_day = os.listdir(NMR_forlder_month)
- dir_list_time = os.listdir(NMR_forlder_month / Path(dir_list_day[-1]))
- path = NMR_forlder_month / Path(dir_list_day[-1]) / Path(dir_list_time[-1])
- print(path)
-
- # else:
- peak_normalized_list = peak_aquire_process(path)
- observed_result = observed_result.append(
- pd.DataFrame(
- peak_normalized_list,
- index=["SM", "product", "side-P"],
- columns=[observed_time],
- ).T
- )
-
- # result
- print(observed_result)
- # save
- observed_result.to_csv(
- r"W:\BS-FlowChemistry\People\Wei-Hsin\Spinsolve\export_dataframe_0317_03.csv",
- header=True,
- )
- plt.figure()
- observed_result.plot()
- plt.legend(loc="best")
- plt.savefig(
- r"W:\BS-FlowChemistry\People\Wei-Hsin\Spinsolve\export_plot_0317_03.png"
- )
-
- await asyncio.sleep(NMR_DELAY)
-
-
-def peak_aquire_process(path):
- spectrum = NMRSpectrum(path)
- spectrum.process()
-
- peak_sum_list = []
-
- # loop over the integration limits
- for name, start, end in peak_list:
- min = spectrum.uc(start, "ppm")
- max = spectrum.uc(end, "ppm")
- if min > max:
- min, max = max, min
- # extract the peak
- peak = spectrum.processed_data[min : max + 1]
- peak_sum_list.append(peak.sum())
-
- # peak normalization
- y = sum(peak_sum_list)
- peak_normalized_list = [i / y for i in peak_sum_list]
- return peak_normalized_list
-
-
-async def main():
- observed_time = 0
- observed_result = pd.DataFrame(
- [1, 0, 0], index=["SM", "product", "side-P"], columns=[observed_time]
- ).T
- await asyncio.wait([Collector(), Analysis(observed_result)])
- # await asyncio.gather([Collector(),Analysis(observed_result)])
-
- # await Analysis(observed_result)
- # await Collector()
-
-
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/examples/nmr/devices.toml b/examples/nmr/devices.toml
new file mode 100644
index 00000000..30816fe4
--- /dev/null
+++ b/examples/nmr/devices.toml
@@ -0,0 +1,15 @@
+
+
+
+[device.pump-b90e33]
+type = "AzuraCompact"
+ip_address = "192.168.1.119" # MAC address during discovery: 00:80:a3:b9:0e:33
+# max_pressure = "XX bar"
+# min_pressure = "XX bar"
+#
+#[device.my-benchtop-nmr] # This is the valve identifier
+#type = "Spinsolve"
+#host = "127.0.0.1" # IP address of the PC running Spinsolve, 127.0.0.1 for local machine. Only necessary parameter.
+#port = 13000
+#sample_name = "automated-experiment"
+#solvent = "chloroform-d"
diff --git a/examples/phidget.py b/examples/phidget.py
deleted file mode 100644
index adbe8523..00000000
--- a/examples/phidget.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import time
-import matplotlib.pyplot as plt
-from flowchem import PressureSensor
-
-p_sens = PressureSensor(
- pressure_range=("0 bar", "25 bar"), vint_serial_number=627768, vint_channel=0
-)
-
-start_time = time.time()
-x = []
-y = []
-while True:
- x.append(time.time() - start_time)
- y.append(p_sens.read_pressure())
- plt.scatter(x, y)
- plt.show()
diff --git a/flowchem/__init__.py b/flowchem/__init__.py
deleted file mode 100644
index b130447c..00000000
--- a/flowchem/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# isort: skip_file
-from .components import *
-from .core import *
-from .assemblies import *
-
-__version__ = "0.0.5"
diff --git a/flowchem/assemblies/LTF_reactors/LTF_HTM_ST_3_1.pdf b/flowchem/assemblies/LTF_reactors/LTF_HTM_ST_3_1.pdf
deleted file mode 100644
index 13cca67b..00000000
Binary files a/flowchem/assemblies/LTF_reactors/LTF_HTM_ST_3_1.pdf and /dev/null differ
diff --git a/flowchem/assemblies/LTF_reactors/LTF_reactors.py b/flowchem/assemblies/LTF_reactors/LTF_reactors.py
deleted file mode 100644
index e256b2e3..00000000
--- a/flowchem/assemblies/LTF_reactors/LTF_reactors.py
+++ /dev/null
@@ -1,57 +0,0 @@
-""" LTF reactors """
-from typing import Optional
-
-from flowchem.assemblies import Assembly
-from flowchem.components.stdlib import Channel, YMixer
-
-
-class LTF_HTM_ST_3_1(Assembly):
- """An LTF HTM ST 3 1 reactor."""
-
- def _validate(self, dry_run):
- return True
-
- def __init__(self, name: Optional[str] = None):
- super().__init__(name=name)
- self.port = {"INLET_1", "INLET_2", "QUENCHER", "OUTLET"}
-
- inlet1 = Channel(
- name="INLET_1", length="10 mm", volume="8 ul", material="glass"
- )
- inlet2 = Channel(
- name="INLET_2", length="10 mm", volume="8 ul", material="glass"
- )
- mixer_inlet = YMixer()
- reactor1 = Channel(
- name="REACTOR", length="60 mm", volume="58 ul", material="glass"
- )
- quencher = Channel(
- name="QUENCHER", length="15 mm", volume="10 ul", material="glass"
- )
- mixer_quencher = YMixer()
- reactor2 = Channel(
- name="REACTOR2", length="40 mm", volume="46 ul", material="glass"
- )
- outlet = Channel(
- name="OUTLET", length="10 mm", volume="28 ul", material="glass"
- )
-
- self.nodes = [
- inlet1,
- inlet2,
- mixer_inlet,
- reactor1,
- quencher,
- mixer_quencher,
- reactor2,
- outlet,
- ]
- self.edges = [
- (inlet1, mixer_inlet),
- (inlet2, mixer_inlet),
- (mixer_inlet, reactor1),
- (reactor1, mixer_quencher),
- (quencher, mixer_quencher),
- (mixer_quencher, reactor2),
- (reactor2, outlet),
- ]
diff --git a/flowchem/assemblies/LTF_reactors/__init__.py b/flowchem/assemblies/LTF_reactors/__init__.py
deleted file mode 100644
index dd78ba48..00000000
--- a/flowchem/assemblies/LTF_reactors/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .LTF_reactors import LTF_HTM_ST_3_1
diff --git a/flowchem/assemblies/README.md b/flowchem/assemblies/README.md
deleted file mode 100644
index 3b280bc1..00000000
--- a/flowchem/assemblies/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# flowchem/assemblies
-
-Pre-defined reactor assembly, i.e. sub-graphs representing hardware that is logically composed by several non-separable components e.g. in a chip reactors.
-
-* LTF_reactors
diff --git a/flowchem/assemblies/__init__.py b/flowchem/assemblies/__init__.py
deleted file mode 100644
index f8e98cc3..00000000
--- a/flowchem/assemblies/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .assembly import Assembly
-from .LTF_reactors import LTF_HTM_ST_3_1
-
-__all__ = ["Assembly", "LTF_HTM_ST_3_1"]
diff --git a/flowchem/assemblies/assembly.py b/flowchem/assemblies/assembly.py
deleted file mode 100644
index ed57db93..00000000
--- a/flowchem/assemblies/assembly.py
+++ /dev/null
@@ -1,85 +0,0 @@
-from typing import TYPE_CHECKING, Sequence, Tuple
-
-from flowchem.components.properties import Component, MultiportComponentMixin
-
-if TYPE_CHECKING:
- from flowchem.core.graph import DeviceGraph
-
-
-class Assembly(MultiportComponentMixin, Component):
- """A class representing a collection of components."""
-
- nodes: Sequence[Component]
- edges: Sequence[Tuple[Component, Component]]
-
- def _subcomponent_by_name(self, name: str) -> Component:
- """Returns a component in nodes by its name."""
- for node in self.nodes:
- if node.name == name:
- return node
- raise ValueError(f"No component named {name} in {self}")
-
- def explode(self, graph: "DeviceGraph"):
- """
- Explode the assembly into its components in the provided graph.
- The graph must already include the assembly as a node with all the connections defined.
- """
-
- assert self in graph.graph.nodes, "Assembly must be in the graph to explode it."
-
- # Convert edges to self into edges to self's components.
- for from_component, to_component, attributes in graph.graph.in_edges(
- self, data=True
- ):
- # If port attribute is unspecified, the connection is assumed to all the assembly's subcomponents.
- # This should only happen for logical connections (e.g. temp control).
- if attributes["to_port"] is None:
- for subcomponent in self.nodes:
- graph.graph.add_edge(from_component, subcomponent)
- continue
-
- # New destination is the component with name matching the edge port on the assembly
- new_to_component = self._subcomponent_by_name(attributes["to_port"])
-
- # Update edge - just add a new one, the old one will be implicitly removed with graph.remove_node(self)
- graph.add_connection(
- origin=from_component,
- destination=new_to_component,
- origin_port=attributes.get("from_port", None),
- )
-
- for from_component, to_component, attributes in graph.graph.out_edges(
- self, data=True
- ):
- assert (
- from_component is self
- ), "Getting the edges pointing from the assembly."
-
- # New origin is the component with name matching the edge port on the assembly
- new_from_component = self._subcomponent_by_name(attributes["from_port"])
-
- # Update edge - just add a new one, the old one will be implicitly removed with graph.remove_node(self)
- graph.add_connection(
- origin=new_from_component,
- destination=to_component,
- destination_port=attributes.get("to_port", None),
- )
-
- # Updates component names. Ensures unique names in the graph. (Note: do not update those earlier: see above!)
- for component in self.nodes:
- component.name = f"{self.name}_{component.name}"
-
- # Remove assembly from graph (this also removes all edges)
- graph.graph.remove_node(self)
-
- # Add nodes to graph
- graph.add_device(self.nodes)
- # Add edges to graph
- for edge in self.edges:
- graph.add_connection(edge[0], edge[1])
-
- def _validate(self, dry_run):
- """Components are valid for dry runs, but not for real runs."""
- raise NotImplementedError(
- "Assembly object should be expanded into their components before run."
- )
diff --git a/flowchem/cli.py b/flowchem/cli.py
deleted file mode 100644
index 0f3953f0..00000000
--- a/flowchem/cli.py
+++ /dev/null
@@ -1,6 +0,0 @@
-""" Shell script executor """
-
-
-def main():
- """Main function"""
- print("Here I should parse configuration and start server")
diff --git a/flowchem/components/README.md b/flowchem/components/README.md
deleted file mode 100644
index 0b9093a0..00000000
--- a/flowchem/components/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# flowchem/components/devices
-
-This folder contains all the components that appear in a device graph.
-
-For real device graph, only components from **assemblies**, **stdlib** and **devices** should be used.
-
-This folder includes:
-* Simple modular components such as mixers and tubing in [stdlib](stdlib/README.md)
-
-* actual hardware devices in [devices](devices/README.md)
-
-* Abstract base classes defining the properties that actual device component can implement in [properties](properties/README.md)
-
-* dummy object for testing purposes in [dummy](dummy/README.md)
-(assemblies/README.md)
diff --git a/flowchem/components/__init__.py b/flowchem/components/__init__.py
deleted file mode 100644
index 31029ff7..00000000
--- a/flowchem/components/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# isort: skip_file
-from . import properties, stdlib, dummy
-from .devices import *
diff --git a/flowchem/components/devices/Hamilton/ML600.py b/flowchem/components/devices/Hamilton/ML600.py
deleted file mode 100644
index c539effc..00000000
--- a/flowchem/components/devices/Hamilton/ML600.py
+++ /dev/null
@@ -1,810 +0,0 @@
-"""
-This module is used to control Hamilton ML600 syringe pump via the protocol1/RNO+.
-"""
-
-from __future__ import annotations
-
-import string
-import time
-import warnings
-from dataclasses import dataclass
-from enum import IntEnum
-from typing import TYPE_CHECKING, Optional, Set
-
-import aioserial
-from loguru import logger
-
-from flowchem.components.stdlib import Pump
-from flowchem.exceptions import DeviceError, InvalidConfiguration
-from flowchem.units import flowchem_ureg
-
-if TYPE_CHECKING:
- import pint
-
-
-@dataclass
-class Protocol1CommandTemplate:
- """Class representing a pump command and its expected reply, but without target pump number"""
-
- command: str
- optional_parameter: str = ""
- execute_command: bool = True
-
- def to_pump(
- self, address: int, command_value: str = "", argument_value: str = ""
- ) -> Protocol1Command:
- """Returns a Protocol11Command by adding to the template pump address and command arguments"""
- return Protocol1Command(
- target_pump_num=address,
- command=self.command,
- optional_parameter=self.optional_parameter,
- command_value=command_value,
- argument_value=argument_value,
- execute_command=self.execute_command,
- )
-
-
-@dataclass
-class Protocol1Command(Protocol1CommandTemplate):
- """Class representing a pump command and its expected reply"""
-
- PUMP_ADDRESS = dict(enumerate(string.ascii_lowercase[:16], start=1))
- # i.e. PUMP_ADDRESS = {1: 'a', 2: 'b', 3: 'c', 4: 'd', ..., 16: 'p'}
- # Note ':' is used for broadcast within the daisy chain.
-
- target_pump_num: int = 1
- command_value: Optional[str] = None
- argument_value: Optional[str] = None
-
- def compile(self) -> bytes:
- """Create actual command byte by prepending pump address to command and appending executing command."""
- assert self.target_pump_num in range(1, 17)
- if not self.command_value:
- self.command_value = ""
-
- compiled_command = (
- f"{self.PUMP_ADDRESS[self.target_pump_num]}"
- f"{self.command}{self.command_value}"
- )
-
- if self.argument_value:
- compiled_command += f"{self.optional_parameter}{self.argument_value}"
- # Add execution flag at the end
- if self.execute_command is True:
- compiled_command += "R"
-
- return (compiled_command + "\r").encode("ascii")
-
-
-class HamiltonPumpIO:
- """Setup with serial parameters, low level IO"""
-
- ACKNOWLEDGE = chr(6)
- NEGATIVE_ACKNOWLEDGE = chr(21)
- DEFAULT_CONFIG = {
- "timeout": 0.1,
- "baudrate": 9600,
- "parity": aioserial.PARITY_EVEN,
- "stopbits": aioserial.STOPBITS_ONE,
- "bytesize": aioserial.SEVENBITS,
- }
-
- def __init__(self, aio_port: aioserial.Serial):
- """
- Initialize communication on the serial port where the pumps are located and initialize them
- Args:
- aio_port: aioserial.Serial() object
- """
- self._serial = aio_port
-
- # These will be set by `HamiltonPumpIO.initialize()`
- self._initialized = False
- self.num_pump_connected: Optional[int] = None
-
- @classmethod
- def from_config(cls, config):
- """Create HamiltonPumpIO from config."""
- # Merge default settings, including serial, with provided ones.
- configuration = dict(HamiltonPumpIO.DEFAULT_CONFIG, **config)
-
- try:
- serial_object = aioserial.AioSerial(**configuration)
- except aioserial.SerialException as serial_exception:
- raise InvalidConfiguration(
- f"Cannot connect to the pump on the port <{configuration.get('port')}>"
- ) from serial_exception
-
- return cls(serial_object)
-
- async def initialize(self, hw_initialization: bool = True):
- """
- Ensure connection with pump + initialize
-
- Args:
- hw_initialization: Whether each pump has to be initialized. Note that this might be undesired!
- """
- # This has to be run after each power cycle to assign addresses to pumps
- self.num_pump_connected = await self._assign_pump_address()
- if hw_initialization:
- await self._hw_init()
- self._initialized = True
-
- async def _assign_pump_address(self) -> int:
- """
- To be run on init, auto assign addresses to pumps based on their position in the daisy chain.
- A custom command syntax with no addresses is used here so read and write has been rewritten
- """
- try:
- await self._write_async("1a\r".encode("ascii"))
- except aioserial.SerialException as e:
- raise InvalidConfiguration from e
-
- reply = await self._read_reply_async()
- if not reply or reply[:1] != "1":
-
- raise InvalidConfiguration(f"No pump found on {self._serial.port}")
- # reply[1:2] should be the address of the last pump. However, this does not work reliably.
- # So here we enumerate the pumps explicitly instead
- last_pump = 0
- for pump_num, address in Protocol1Command.PUMP_ADDRESS.items():
- await self._write_async(f"{address}UR\r".encode("ascii"))
- if "NV01" in await self._read_reply_async():
- last_pump = pump_num
- else:
- break
- logger.debug(f"Found {last_pump} pumps on {self._serial.port}!")
- return int(last_pump)
-
- async def _hw_init(self):
- """Send to all pumps the HW initialization command (i.e. homing)"""
- await self._write_async(b":XR\r") # Broadcast: initialize + execute
- # Note: no need to consume reply here because there is none (since we are using broadcast)
-
- async def _write_async(self, command: bytes):
- """Writes a command to the pump"""
- if not self._initialized:
- raise DeviceError(
- "Pump not initialized!\n"
- "Have you called `initialize()` after object creation?"
- )
- await self._serial.write_async(command)
- logger.debug(f"Command {repr(command)} sent!")
-
- async def _read_reply_async(self) -> str:
- """Reads the pump reply from serial communication"""
- reply_string = await self._serial.readline_async()
- logger.debug(f"Reply received: {reply_string}")
- return reply_string.decode("ascii")
-
- @staticmethod
- def parse_response(response: str) -> str:
- """Split a received line in its components: success, reply"""
- status = response[:1]
- assert status in (
- HamiltonPumpIO.ACKNOWLEDGE,
- HamiltonPumpIO.NEGATIVE_ACKNOWLEDGE,
- ), "Invalid status reply!"
-
- if status == HamiltonPumpIO.ACKNOWLEDGE:
- logger.debug("Positive acknowledge received")
- else:
- logger.warning("Negative acknowledge received")
- warnings.warn(
- "Negative acknowledge reply received from pump: check command validity!"
- )
-
- return response[1:].rstrip()
-
- def reset_buffer(self):
- """Reset input buffer before reading from serial. In theory not necessary if all replies are consumed..."""
- self._serial.reset_input_buffer()
-
- async def write_and_read_reply_async(self, command: Protocol1Command) -> str:
- """Main HamiltonPumpIO method.
- Sends a command to the pump, read the replies and returns it, optionally parsed"""
- self.reset_buffer()
- await self._write_async(command.compile())
- response = await self._read_reply_async()
-
- if not response:
- raise InvalidConfiguration(
- f"No response received from pump, check pump address! "
- f"(Currently set to {command.target_pump_num})"
- )
-
- return self.parse_response(response)
-
- @property
- def name(self) -> str:
- """This is used to provide a nice-looking default name to pumps based on their serial connection."""
- try:
- return self._serial.name
- except AttributeError:
- return ""
-
-
-class ML600(Pump):
- """ML600 implementation according to docs. Tested on 61501-01 (single syringe).
-
- From docs:
- To determine the volume dispensed per step the total syringe volume is divided by
- 48,000 steps. All Hamilton instrument syringes are designed with a 60 mm stroke
- length and the Microlab 600 is designed to move 60 mm in 48,000 steps. For
- example to dispense 9 mL from a 10 mL syringe you would determine the number of
- steps by multiplying 48000 steps (9 mL/10 mL) to get 43,200 steps.
- """
-
- # This class variable is used for daisy chains (i.e. multiple pumps on the same serial connection). Details below.
- _io_instances: Set[HamiltonPumpIO] = set()
- # The mutable object (a set) as class variable creates a shared state across all the instances.
- # When several pumps are daisy-chained on the same serial port, they need to all access the same Serial object,
- # because access to the serial port is exclusive by definition (also locking there ensure thread safe operations).
- # FYI it is a borg idiom https://www.oreilly.com/library/view/python-cookbook/0596001673/ch05s23.html
-
- class ValvePositionName(IntEnum):
- """Maps valve position to the corresponding number"""
-
- POSITION_1 = 1
- # POSITION_2 = 2
- POSITION_3 = 3
- INPUT = 9 # 9 is default inlet, i.e. 1
- OUTPUT = 10 # 10 is default outlet, i.e. 3
- WASH = 11 # 11 is default wash, i.e. undefined
-
- # Only Hamilton syringes are compatible w/ the ML600, and they come on a limited set of sizes. (Values in ml)
- VALID_SYRINGE_VOLUME = {
- 0.01,
- 0.025,
- 0.05,
- 0.1,
- 0.25,
- 0.5,
- 1.0,
- 2.5,
- 5.0,
- 10.0,
- 25.0,
- 50.0,
- }
-
- def __init__(
- self,
- pump_io: HamiltonPumpIO,
- syringe_volume: str,
- address: int = 1,
- name: Optional[str] = None,
- ):
- """
- Default constructor, needs an HamiltonPumpIO object. See from_config() class method for config-based init.
-
- Args:
- pump_io: An HamiltonPumpIO w/ serial connection to the daisy chain w/ target pump.
- syringe_volume: Volume of the syringe used, either a Quantity or number in ml.
- address: number of pump in array, 1 for first one, auto-assigned on init based on position.
- name: 'cause naming stuff is important.
- """
- super().__init__(name)
- # HamiltonPumpIO
- self.pump_io = pump_io
- ML600._io_instances.add(self.pump_io) # See above for details.
-
- # Pump address is the pump sequence number if in chain. Count starts at 1, default.
- self.address = int(address)
-
- # The pump name is used for logs and error messages.
- self.name = f"Pump {self.pump_io.name}:{address}" if name is None else name
-
- # Syringe pumps only perform linear movement, and the volume displaced is function of the syringe loaded.
- try:
- self.syringe_volume = flowchem_ureg(syringe_volume)
- except AttributeError as attribute_error:
- raise InvalidConfiguration(
- f"{self.__class__.__name__}:{self.name} "
- f"Syringe volume must be a string parsable as pint.Quantity!\n"
- f"It is now a {type(syringe_volume)}: {syringe_volume} "
- ) from attribute_error
-
- if self.syringe_volume.m_as("ml") not in ML600.VALID_SYRINGE_VOLUME:
- raise InvalidConfiguration(
- f"The specified syringe volume ({syringe_volume}) does not seem to be valid!\n"
- f"The volume in ml has to be one of {ML600.VALID_SYRINGE_VOLUME}"
- )
-
- self._steps_per_ml = flowchem_ureg.Quantity(
- f"{48000 / self.syringe_volume} step/ml"
- )
- self._offset_steps = 100 # Steps added to each absolute move command, to decrease wear and tear at volume = 0
- self._max_vol = (
- (48000 - self._offset_steps) * flowchem_ureg.step / self._steps_per_ml
- )
-
- @classmethod
- def from_config(cls, **config):
- """This class method is used to create instances via config file by the server for HTTP interface."""
- # Many pump can be present on the same serial port with different addresses.
- # This shared list of HamiltonPumpIO objects allow shared state in a borg-inspired way, avoiding singletons
- # This is only relevant to programmatic instantiation, i.e. when from_config() is called per each pump from a
- # config file, as it is the case in the HTTP server.
- # HamiltonPump_IO() manually instantiated are not accounted for.
- pumpio = None
- for obj in ML600._io_instances:
- # noinspection PyProtectedMember
- if obj._serial.port == config.get("port"):
- pumpio = obj
- break
-
- # If not existing serial object are available for the port provided, create a new one
- if pumpio is None:
- # Remove ML600-specific keys to only have HamiltonPumpIO's kwargs
- config_for_pumpio = {
- k: v
- for k, v in config.items()
- if k not in ("syringe_volume", "address", "name")
- }
- pumpio = HamiltonPumpIO.from_config(config_for_pumpio)
-
- return cls(
- pumpio,
- syringe_volume=config.get("syringe_volume"),
- address=config.get("address"),
- name=config.get("name"),
- )
-
- async def initialize(self, hw_init=False, init_speed: str = "200 sec / stroke"):
- """Must be called after init before anything else."""
- # Test connectivity by querying the pump's firmware version
- fw_cmd = Protocol1CommandTemplate(command="U").to_pump(self.address)
- firmware_version = await self.pump_io.write_and_read_reply_async(fw_cmd)
- logger.info(
- f"Connected to Hamilton ML600 {self.name} - FW version: {firmware_version}!"
- )
-
- if hw_init:
- await self.initialize_pump(speed=init_speed)
-
- async def send_command_and_read_reply(
- self,
- command_template: Protocol1CommandTemplate,
- command_value="",
- argument_value="",
- ) -> str:
- """Sends a command based on its template by adding pump address and parameters, returns reply"""
- return await self.pump_io.write_and_read_reply_async(
- command_template.to_pump(self.address, command_value, argument_value)
- )
-
- def _validate_speed(self, speed_value: Optional[str]) -> str:
- """Given a speed (seconds/stroke) returns a valid value for it, and a warning if out of bounds."""
-
- # Validated speeds are used as command argument, with empty string being the default for None
- if speed_value is None:
- return ""
-
- speed = flowchem_ureg(speed_value)
-
- # Alert if out of bounds but don't raise exceptions, according to general philosophy.
- # Target flow rate too high
- if speed < flowchem_ureg("2 sec/stroke"):
- speed = flowchem_ureg("2 sec/stroke")
- warnings.warn(
- f"Desired speed ({speed}) is unachievable!"
- f"Set to {self._seconds_per_stroke_to_flowrate(speed)}"
- f"Wrong units? A bigger syringe is needed?"
- )
-
- # Target flow rate too low
- if speed > flowchem_ureg("3692 sec/stroke"):
- speed = flowchem_ureg("3692 sec/stroke")
- warnings.warn(
- f"Desired speed ({speed}) is unachievable!"
- f"Set to {self._seconds_per_stroke_to_flowrate(speed)}"
- f"Wrong units? A smaller syringe is needed?"
- )
-
- return str(round(speed.m_as("sec / stroke")))
-
- async def initialize_pump(self, speed: Optional[str] = None):
- """
- Initialize both syringe and valve
- speed: 2-3692 in seconds/stroke
- """
- init_cmd = Protocol1CommandTemplate(command="X", optional_parameter="S")
- return await self.send_command_and_read_reply(
- init_cmd, argument_value=self._validate_speed(speed)
- )
-
- async def initialize_valve(self):
- """Initialize valve only"""
- return await self.send_command_and_read_reply(
- Protocol1CommandTemplate(command="LX")
- )
-
- async def initialize_syringe(self, speed: Optional[str] = None):
- """
- Initialize syringe only
- speed: 2-3692 in seconds/stroke
- """
- init_syringe_cmd = Protocol1CommandTemplate(
- command="X1", optional_parameter="S"
- )
- return await self.send_command_and_read_reply(
- init_syringe_cmd, argument_value=self._validate_speed(speed)
- )
-
- def flowrate_to_seconds_per_stroke(self, flowrate: str):
- """
- Convert flow rates to steps per seconds
-
- To determine the volume dispensed per step the total syringe volume is divided by
- 48,000 steps. All Hamilton instrument syringes are designed with a 60 mm stroke
- length and the Microlab 600 is designed to move 60 mm in 48,000 steps. For
- example to dispense 9 mL from a 10 mL syringe you would determine the number of
- steps by multiplying 48000 steps (9 mL/10 mL) to get 43,200 steps.
- """
- flowrate = flowchem_ureg(flowrate)
- flowrate_in_steps_sec = flowrate * self._steps_per_ml
- seconds_per_stroke = (1 / flowrate_in_steps_sec).to("second/stroke")
-
- return self._validate_speed(str(seconds_per_stroke))
-
- def _seconds_per_stroke_to_flowrate(
- self, second_per_stroke: pint.Quantity
- ) -> float:
- """The inverse of flowrate_to_seconds_per_stroke(). Only internal use."""
- flowrate = 1 / (second_per_stroke * self._steps_per_ml)
- return flowrate.to("ml/min")
-
- def _volume_to_step_position(self, volume_w_units: str) -> int:
- """Converts a volume to a step position."""
- # noinspection PyArgumentEqualDefault
- volume = flowchem_ureg(volume_w_units)
- steps = volume * self._steps_per_ml
- return round(steps.m_as("steps")) + self._offset_steps
-
- async def _to_step_position(self, position: int, speed: str = ""):
- """Absolute move to step position."""
- abs_move_cmd = Protocol1CommandTemplate(command="M", optional_parameter="S")
- return await self.send_command_and_read_reply(
- abs_move_cmd, str(position), self._validate_speed(speed)
- )
-
- async def get_current_volume(self) -> str:
- """Return current syringe position in ml."""
- syringe_pos = await self.send_command_and_read_reply(
- Protocol1CommandTemplate(command="YQP")
- )
- current_steps = (int(syringe_pos) - self._offset_steps) * flowchem_ureg.step
- return str(current_steps / self._steps_per_ml)
-
- async def to_volume(self, target_volume: str, speed: str = ""):
- """Absolute move to volume provided."""
- await self._to_step_position(
- self._volume_to_step_position(target_volume), speed
- )
- logger.debug(f"Pump {self.name} set to volume {target_volume} at speed {speed}")
-
- async def pause(self):
- """Pause any running command."""
- return await self.send_command_and_read_reply(
- Protocol1CommandTemplate(command="K", execute_command=False)
- )
-
- async def resume(self):
- """Resume any paused command."""
- return await self.send_command_and_read_reply(
- Protocol1CommandTemplate(command="$", execute_command=False)
- )
-
- async def stop(self):
- """Stops and abort any running command."""
- await self.pause()
- return await self.send_command_and_read_reply(
- Protocol1CommandTemplate(command="V", execute_command=False)
- )
-
- async def wait_until_idle(self):
- """Returns when no more commands are present in the pump buffer."""
- logger.debug(f"ML600 pump {self.name} wait until idle...")
- while self.is_busy:
- time.sleep(0.1)
- logger.debug(f"...ML600 pump {self.name} idle now!")
-
- async def version(self) -> str:
- """Returns the current firmware version reported by the pump."""
- return await self.send_command_and_read_reply(
- Protocol1CommandTemplate(command="U")
- )
-
- async def is_idle(self) -> bool:
- """Checks if the pump is idle (actually check if the last command has ended)."""
- return (
- await self.send_command_and_read_reply(
- Protocol1CommandTemplate(command="F")
- )
- == "Y"
- )
-
- async def is_busy(self) -> bool:
- """Pump is not idle."""
- return not await self.is_idle()
-
- async def get_valve_position(self) -> ValvePositionName:
- """Represent the position of the valve: getter returns Enum, setter needs Enum."""
- valve_pos = await self.send_command_and_read_reply(
- Protocol1CommandTemplate(command="LQP")
- )
- return ML600.ValvePositionName(int(valve_pos))
-
- async def set_valve_position(
- self, target_position: ValvePositionName, wait_for_movement_end: bool = True
- ):
- """Set valve position. wait_for_movement_end is defaulted to True as it is a common mistake not to wait..."""
- valve_by_name_cw = Protocol1CommandTemplate(command="LP0")
- await self.send_command_and_read_reply(
- valve_by_name_cw, command_value=str(int(target_position))
- )
- logger.debug(f"{self.name} valve position set to {target_position.name}")
- if wait_for_movement_end:
- await self.wait_until_idle()
-
- async def get_return_steps(self) -> int:
- """Return steps' getter. Applied to the end of a downward syringe movement to removes mechanical slack."""
- steps = await self.send_command_and_read_reply(
- Protocol1CommandTemplate(command="YQN")
- )
- return int(steps)
-
- async def set_return_steps(self, target_steps: int):
- """Return steps' setter. Applied to the end of a downward syringe movement to removes mechanical slack."""
- set_return_steps_cmd = Protocol1CommandTemplate(command="YSN")
- await self.send_command_and_read_reply(
- set_return_steps_cmd, command_value=str(int(target_steps))
- )
-
- async def pickup(
- self,
- volume: str,
- from_valve: ValvePositionName,
- flowrate: str = "1 ml/min",
- wait: bool = False,
- ):
- """Get volume from valve specified at given flowrate."""
- cur_vol = flowchem_ureg(await self.get_current_volume())
- if (cur_vol + volume) > self._max_vol:
- warnings.warn(
- f"Cannot withdraw {volume} given the current syringe position {cur_vol} and a "
- f"syringe volume of {self.syringe_volume}"
- )
- return
-
- # Valve to position specified
- await self.set_valve_position(from_valve)
- # Move up to target volume
- await self.to_volume(
- str(cur_vol + volume),
- speed=self.flowrate_to_seconds_per_stroke(flowrate),
- )
-
- if wait:
- await self.wait_until_idle()
-
- async def deliver(
- self,
- volume: str,
- to_valve: ValvePositionName,
- flowrate: str,
- wait: bool = False,
- ):
- """Delivers volume to valve specified at given flow rate."""
- cur_vol = flowchem_ureg(await self.get_current_volume())
- if volume > cur_vol:
- warnings.warn(
- f"Cannot deliver {volume} given the current syringe position {cur_vol}!"
- )
- return
-
- # Valve to position specified
- await self.set_valve_position(to_valve)
- # Move up to target volume
- await self.to_volume(
- str(cur_vol - volume),
- speed=self.flowrate_to_seconds_per_stroke(flowrate),
- )
-
- if wait:
- await self.wait_until_idle()
-
- async def transfer(
- self,
- volume: str,
- from_valve: ValvePositionName,
- to_valve: ValvePositionName,
- flowrate_in: str = "1 ml/min",
- flowrate_out: str = "1 ml/min",
- wait: bool = False,
- ):
- """Move liquid from place to place."""
- await self.pickup(volume, from_valve, flowrate_in, wait=True)
- await self.deliver(volume, to_valve, flowrate_out, wait=wait)
-
- def get_router(self):
- """Creates an APIRouter for this object."""
- from fastapi import APIRouter
-
- router = APIRouter()
- router.add_api_route("/firmware-version", self.version, methods=["GET"])
- router.add_api_route("/initialize/pump", self.initialize_pump, methods=["PUT"])
- router.add_api_route(
- "/initialize/valve", self.initialize_valve, methods=["PUT"]
- )
- router.add_api_route(
- "/initialize/syringe", self.initialize_syringe, methods=["PUT"]
- )
- router.add_api_route("/pause", self.pause, methods=["PUT"])
- router.add_api_route("/resume", self.resume, methods=["PUT"])
- router.add_api_route("/resume", self.resume, methods=["PUT"])
- router.add_api_route("/stop", self.stop, methods=["PUT"])
- router.add_api_route("/version", self.stop, methods=["PUT"])
- router.add_api_route("/is-idle", self.is_idle, methods=["GET"])
- router.add_api_route("/is-busy", self.is_busy, methods=["GET"])
- router.add_api_route(
- "/valve/position", self.get_valve_position, methods=["GET"]
- )
- router.add_api_route(
- "/valve/position", self.set_valve_position, methods=["PUT"]
- )
- router.add_api_route(
- "/syringe/volume", self.get_current_volume, methods=["GET"]
- )
- router.add_api_route("/syringe/volume", self.to_volume, methods=["PUT"])
- router.add_api_route(
- "/syringe/return-steps", self.get_return_steps, methods=["GET"]
- )
- router.add_api_route(
- "/syringe/return-steps", self.set_return_steps, methods=["PUT"]
- )
- router.add_api_route("/pickup", self.pickup, methods=["PUT"])
- router.add_api_route("/deliver", self.deliver, methods=["PUT"])
- # router.add_api_route("/transfer", self.transfer, methods=["PUT"]) # Might go in timeout
-
- return router
-
-
-# class TwoPumpAssembly(Thread):
-# """
-# Thread to control two pumps and have them generating a continuous flow.
-# Note that the pumps should not be accessed directly when used in a TwoPumpAssembly!
-#
-# Notes: this needs to start a thread owned by the instance to control the pumps.
-# The async version of this being possibly simpler w/ tasks and callback :)
-# """
-#
-# def __init__(
-# self, pump1: ML600, pump2: ML600, target_flowrate: str, init_seconds: int = 10
-# ):
-# super(TwoPumpAssembly, self).__init__()
-# self._p1 = pump1
-# self._p2 = pump2
-# self.daemon = True
-# self.cancelled = threading.Event()
-# self._flowrate = ensure_quantity(target_flowrate, "ml/min")
-# logger = logging.getLogger(__name__).getChild("TwoPumpAssembly")
-# # How many seconds per stroke for first filling? application dependent, as fast as possible, but not too much.
-# self.init_secs = init_seconds
-#
-# # While in principle possible, using syringes of different volumes is discouraged, hence...
-# assert (
-# pump1.syringe_volume == pump2.syringe_volume
-# ), "Syringes w/ equal volume are needed for continuous flow!"
-#
-# async def initialize(self):
-# """ Initialize multi-pump """
-# await self._p1.initialize()
-# await self._p2.initialize()
-#
-# @property
-# def flowrate(self):
-# """ Returns/sets flowrate. """
-# return self._flowrate
-#
-# @flowrate.setter
-# def flowrate(self, target_flowrate):
-# if target_flowrate == 0:
-# warnings.warn(
-# "Cannot set flowrate to 0! Pump stopped instead, restart previous flowrate with resume!"
-# )
-# self.cancel()
-# else:
-# self._flowrate = target_flowrate
-#
-# # This will stop current movement, make wait_for_both_pumps() return and move on w/ updated speed
-# self._p1.stop()
-# self._p2.stop()
-#
-# async def wait_for_both_pumps(self):
-# """ Custom waiting method to wait a shorter time than normal (for better sync) """
-# while await self._p1.is_busy() or await self._p2.is_busy():
-# await asyncio.sleep(0.01) # 10ms sounds reasonable to me
-# logger.debug("Both pumps are ready!")
-#
-# def _speed(self):
-# speed = self._p1.flowrate_to_seconds_per_stroke(self._flowrate)
-# logger.debug(f"Speed calculated as {speed}")
-# return speed
-#
-# async def execute_stroke(
-# self, pump_full: ML600, pump_empty: ML600, speed_s_per_stroke: int
-# ):
-# """ Perform a cycle (1 syringe stroke) in the continuous-operation mode. See also run(). """
-# # Logic is a bit complex here to ensure pause-less pumping
-# # This needs the pump that withdraws to move faster than the pumping one. no way around.
-#
-# # First start pumping with the full syringe already prepared
-# pump_full.to_volume(0, speed=speed_s_per_stroke)
-# logger.debug("Pumping...")
-# # Then start refilling the empty one
-# pump_empty.set_valve_position(pump_empty.ValvePositionName.INPUT)
-# # And do that fast so that we finish refill before the pumping is over
-# pump_empty.to_volume(pump_empty.syringe_volume, speed=speed_s_per_stroke - 5)
-# pump_empty.wait_until_idle()
-# # This allows us to set the right pump position on the pump that was empty (not full and ready for next cycle)
-# pump_empty.set_valve_position(pump_empty.ValvePositionName.OUTPUT)
-# pump_full.wait_until_idle()
-#
-# def run(self):
-# """Overloaded Thread.run, runs the update
-# method once per every 10 milliseconds."""
-# # First initialize with init_secs speed...
-# self._p1.to_volume(self._p1.syringe_volume, speed=self.init_secs)
-# self._p1.wait_until_idle()
-# self._p1.valve_position = self._p1.ValvePositionName.OUTPUT
-# logger.info("Pumps initialized for continuous pumping!")
-#
-# while True:
-# while not self.cancelled.is_set():
-# self.execute_stroke(
-# self._p1, self._p2, speed_s_per_stroke=self._speed()
-# )
-# self.execute_stroke(
-# self._p2, self._p1, speed_s_per_stroke=self._speed()
-# )
-#
-# def cancel(self):
-# """ Cancel continuous-pumping assembly """
-# self.cancelled.set()
-# self._p1.stop()
-# self._p2.stop()
-#
-# def resume(self):
-# """ Resume continuous-pumping assembly """
-# self.cancelled.clear()
-#
-# def stop_and_return_solution_to_container(self):
-# """ Let´s not waste our precious stock solutions ;) """
-# self.cancel()
-# logger.info(
-# "Returning the solution currently loaded in the syringes back to the inlet.\n"
-# "Make sure the container is not removed yet!"
-# )
-# # Valve to input
-# self._p1.valve_position = self._p1.ValvePositionName.INPUT
-# self._p2.valve_position = self._p2.ValvePositionName.INPUT
-# self.wait_for_both_pumps()
-# # Volume to 0 with the init speed (supposedly safe for this application)
-# self._p1.to_volume(0, speed=self.init_secs)
-# self._p2.to_volume(0, speed=self.init_secs)
-# self.wait_for_both_pumps()
-# logger.info("Pump flushing completed!")
-
-
-if __name__ == "__main__":
- import asyncio
-
- conf = {
- "port": "COM12",
- "address": 1,
- "name": "test1",
- "syringe_volume": 5,
- }
- pump1 = ML600.from_config(**conf)
- asyncio.run(pump1.initialize_pump())
diff --git a/flowchem/components/devices/Hamilton/ML600_finder.py b/flowchem/components/devices/Hamilton/ML600_finder.py
deleted file mode 100644
index 14c6a181..00000000
--- a/flowchem/components/devices/Hamilton/ML600_finder.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""
-This module is used to discover the serial address of any ML600 connected to the PC.
-"""
-import asyncio
-
-import aioserial
-import serial.tools.list_ports
-from loguru import logger
-
-from flowchem.components.devices.Hamilton.ML600 import (
- HamiltonPumpIO,
- InvalidConfiguration,
-)
-
-
-def ml600_finder():
- """Try to initialize an ML600 on every available COM port."""
- port_available = [comport.device for comport in serial.tools.list_ports.comports()]
-
- # Ports connected to an ML600-looking device
- valid_ports = set()
-
- for serial_port in port_available:
- try:
- print(f"Looking for pump on {serial_port}...")
- link = HamiltonPumpIO(aioserial.AioSerial(url=serial_port, timeout=0.1))
- asyncio.run(link.initialize())
- logger.info(f"{link.num_pump_connected} pump(s) found on <{serial_port}>")
- valid_ports.add(serial_port)
- except InvalidConfiguration:
- logger.debug(f"No pump found on {serial_port}")
-
- return valid_ports
-
-
-if __name__ == "__main__":
- ml600_pumps = ml600_finder()
- if len(ml600_pumps) > 0:
- print(f"The following serial port are connected to ML600: {ml600_pumps}")
- else:
- print("No ML600 pump found")
diff --git a/flowchem/components/devices/Hamilton/__init__.py b/flowchem/components/devices/Hamilton/__init__.py
deleted file mode 100644
index d11be9e4..00000000
--- a/flowchem/components/devices/Hamilton/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-""" Hamilton devices """
-from .ML600 import ML600, HamiltonPumpIO
-
-__all__ = ["ML600", "HamiltonPumpIO"]
diff --git a/flowchem/components/devices/Harvard_Apparatus/Elite11_finder.py b/flowchem/components/devices/Harvard_Apparatus/Elite11_finder.py
deleted file mode 100644
index 5f974877..00000000
--- a/flowchem/components/devices/Harvard_Apparatus/Elite11_finder.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-This module is used to discover the serial address of any ML600 connected to the PC.
-"""
-import serial.tools.list_ports
-from loguru import logger
-
-from flowchem.components.devices.Harvard_Apparatus.HA_elite11 import (
- HarvardApparatusPumpIO,
-)
-from flowchem.exceptions import InvalidConfiguration
-
-
-# noinspection PyProtectedMember
-def elite11_finder():
- """Try to initialize an Elite11 on every available COM port."""
- port_available = [comport.device for comport in serial.tools.list_ports.comports()]
-
- # Ports connected to an elite11-looking device
- valid_ports = set()
-
- for serial_port in port_available:
- try:
- print(f"Looking for pump on {serial_port}...")
- link = HarvardApparatusPumpIO(port=serial_port)
- link._serial.write("\r\n".encode("ascii"))
- if link._serial.readline() == b"\n":
- valid_ports.add(serial_port)
- logger.info(f"Pump found on <{serial_port}>")
- pump = link._serial.readline().decode("ascii")
- logger.info(f"Pump address is {pump[0:2]}!")
- print(f"Found a pump with address {pump[0:2]} on {serial_port}!")
- else:
- logger.debug(f"No pump found on {serial_port}")
- except InvalidConfiguration:
- pass
-
- return valid_ports
-
-
-if __name__ == "__main__":
- elite11_pumps = elite11_finder()
- if len(elite11_pumps) > 0:
- print(f"The following serial port are connected to Elite11: {elite11_pumps}")
- else:
- print("No Elite11 pump found")
diff --git a/flowchem/components/devices/Harvard_Apparatus/HA_elite11.py b/flowchem/components/devices/Harvard_Apparatus/HA_elite11.py
deleted file mode 100644
index 2c60f4b2..00000000
--- a/flowchem/components/devices/Harvard_Apparatus/HA_elite11.py
+++ /dev/null
@@ -1,863 +0,0 @@
-"""
-This module is used to control Harvard Apparatus Elite 11 syringe pump via the 11 protocol.
-"""
-
-from __future__ import annotations
-
-import asyncio
-import warnings
-from dataclasses import dataclass
-from enum import Enum
-from typing import List, Optional, Set, Tuple
-
-import aioserial
-from loguru import logger
-from pydantic import BaseModel
-
-from flowchem.components.stdlib import Pump
-from flowchem.exceptions import DeviceError, InvalidConfiguration
-from flowchem.units import flowchem_ureg
-
-
-def _parse_version(version_text: str) -> Tuple[int, int, int]:
- """Extract semver from Elite11 version string, e.g. '11 ELITE I/W Single 3.0.4"""
-
- numbers = version_text.split(" ")[-1]
- version_digits = numbers.split(".")
- return int(version_digits[0]), int(version_digits[1]), int(version_digits[2])
-
-
-class PumpInfo(BaseModel):
- """Detailed pump info."""
-
- pump_type: str
- pump_description: str
- infuse_only: bool
-
- @classmethod
- def parse_pumpstring(cls, metrics_text: List[str]):
- """Parse pump response string into model."""
- pump_type, pump_description, infuse_only = "", "", True
- for line in metrics_text:
- if line.startswith("Pump type "):
- pump_type = line[9:].strip()
- elif line.startswith("Pump type string"):
- pump_description = line[16:].strip()
- elif line.startswith("Direction"):
- if "withdraw" in line:
- infuse_only = False
- else:
- infuse_only = True
- return cls(
- pump_type=pump_type,
- pump_description=pump_description,
- infuse_only=infuse_only,
- )
-
-
-@dataclass
-class Protocol11Command:
- """Class representing a pump command and its expected reply"""
-
- command_string: str
- target_pump_address: int
- command_argument: str
-
- def compile(self) -> str:
- """
- Create actual command byte by prepending pump address to command.
- """
- assert 0 <= self.target_pump_address < 99
- return (
- str(self.target_pump_address)
- + self.command_string
- + " "
- + self.command_argument
- + "\r\n"
- )
-
-
-class PumpStatus(Enum):
- """Possible pump statuses, as defined by the reply prompt."""
-
- IDLE = ":"
- INFUSING = ">"
- WITHDRAWING = "<"
- TARGET_REACHED = "T"
- STALLED = "*"
-
-
-class HarvardApparatusPumpIO:
- """Setup with serial parameters, low level IO"""
-
- DEFAULT_CONFIG = {"timeout": 0.1, "baudrate": 115200}
-
- # noinspection PyPep8
- def __init__(self, port: str, **kwargs):
- # Merge default settings, including serial, with provided ones.
- configuration = dict(HarvardApparatusPumpIO.DEFAULT_CONFIG, **kwargs)
-
- try:
- self._serial = aioserial.AioSerial(port, **configuration)
- except aioserial.SerialException as serial_exception:
- logger.error(f"Cannot connect to the Pump on the port <{port}>")
- raise InvalidConfiguration(
- f"Cannot connect to the Pump on the port <{port}>"
- ) from serial_exception
-
- async def _write(self, command: Protocol11Command):
- """Writes a command to the pump"""
- command_msg = command.compile()
- try:
- await self._serial.write_async(command_msg.encode("ascii"))
- except aioserial.SerialException as serial_exception:
- raise InvalidConfiguration from serial_exception
- logger.debug(f"Sent {repr(command_msg)}!")
-
- async def _read_reply(self) -> List[str]:
- """Reads the pump reply from serial communication"""
- reply_string = []
-
- for line in await self._serial.readlines_async():
- reply_string.append(line.decode("ascii").strip())
- logger.debug(f"Received {repr(line)}!")
-
- # First line is usually empty, but some prompts such as T* actually leak into this line sometimes.
- reply_string.pop(0)
-
- # remove empty strings from reply_string
- reply_string = [x for x in reply_string if x]
-
- return reply_string
-
- @staticmethod
- def parse_response_line(line: str) -> Tuple[int, PumpStatus, str]:
- """Split a received line in its components: address, prompt and reply body"""
- assert len(line) >= 3
- pump_address = int(line[0:2])
- status = PumpStatus(line[2:3])
-
- # Target reached is the only two-character status
- if status is PumpStatus.TARGET_REACHED:
- return pump_address, status, line[4:]
- return pump_address, status, line[3:]
-
- @staticmethod
- def parse_response(
- response: List[str],
- ) -> Tuple[List[int], List[PumpStatus], List[str]]:
- """Aggregates address prompt and reply body from all the reply lines and return them."""
- parsed_lines = list(map(HarvardApparatusPumpIO.parse_response_line, response))
- # noinspection PyTypeChecker
- return zip(*parsed_lines) # type: ignore
-
- @staticmethod
- def check_for_errors(last_response_line, command_sent):
- """Further response parsing, checks for error messages"""
- if "Command error" in last_response_line:
- raise DeviceError(
- f"The command {command_sent} is invalid for pump {command_sent.target_pump_address}!"
- f"[Reply: {last_response_line}]"
- )
- if "Unknown command" in last_response_line:
- raise DeviceError(
- f"The command {command_sent} is unknown to pump {command_sent.target_pump_address}!"
- f"[Maybe a withdraw command has been used with an infuse only pump?]"
- f"[Reply: {last_response_line}]"
- )
- if "Argument error" in last_response_line:
- raise DeviceError(
- f"The command {command_sent} to pump {command_sent.target_pump_address} has an "
- f"invalid argument [Reply: {last_response_line}]"
- )
- if "Out of range" in last_response_line:
- raise DeviceError(
- f"The command {command_sent} to pump {command_sent.target_pump_address} has an "
- f"argument out of range! [Reply: {last_response_line}]"
- )
-
- def reset_buffer(self):
- """Reset input buffer before reading from serial. In theory not necessary if all replies are consumed..."""
- try:
- self._serial.reset_input_buffer()
- except aioserial.PortNotOpenError as port_not_open_error:
- raise InvalidConfiguration from port_not_open_error
-
- async def write_and_read_reply(
- self, command: Protocol11Command, return_parsed: bool = True
- ) -> List[str]:
- """Main PumpIO method. Sends a command to the pump, read the replies and returns it, optionally parsed.
-
- If unparsed reply is a List[str] with raw replies.
- If parsed reply is a List[str] w/ reply body (address and prompt removed from each line)"""
- self.reset_buffer()
- await self._write(command)
- response = await self._read_reply()
-
- if not response:
- raise InvalidConfiguration(
- f"No response received from pump, check pump address! "
- f"(Currently set to {command.target_pump_address})"
- )
-
- # Parse reply
- (
- pump_address,
- return_status,
- parsed_response,
- ) = HarvardApparatusPumpIO.parse_response(response)
-
- # Ensures that all the replies came from the target pump (this should always be the case)
- assert all(address == command.target_pump_address for address in pump_address)
-
- # Ensure no stall is present (this might happen, so let's raise an Exception w/ diagnostic text)
- if PumpStatus.STALLED in return_status:
- raise DeviceError("Pump stalled! Press display on pump to clear error :(")
-
- HarvardApparatusPumpIO.check_for_errors(
- last_response_line=response[-1], command_sent=command
- )
-
- return parsed_response if return_parsed else response
-
- @property
- def name(self) -> Optional[str]:
- """This is used to provide a nice-looking default name to pumps based on their serial connection."""
- try:
- return self._serial.name
- except AttributeError:
- return None
-
- def autodetermine_address(self) -> int:
- """Autodetermine pump address based on response received."""
- self._serial.write("\r\n".encode("ascii"))
- self._serial.readline()
- prompt = self._serial.readline()
- valid_status = [status.value for status in PumpStatus]
- address = 0 if prompt[0:2].decode() in valid_status else int(prompt[0:2])
- logger.debug(f"Address autodetected as {address}")
- return address
-
-
-# noinspection SpellCheckingInspection
-class Elite11Commands:
-
- """Holds the commands and arguments. Nota bene: Pump needs to be in Quick Start mode, which can be achieved from
- the display interface"""
-
- # collected commands
- # Methods can be programmed onto the pump and their execution remotely triggered.
- # No support is provided to such feature as "explicit is better than implicit", i.e. the same result can be obtained
- # with a sequence of Elite11Commands, with the advantage of ensuring code reproducibility (i.e. no specific
- # configuration is needed on the pump side)
- #
- # Other methods not included: dim display, usb echo, footswitch, poll, version (verbose ver), input,
- # output (if pin state high or low) and time commands
-
- EMPTY_MESSAGE = " "
- VERSION = "VER"
-
- # RUN commands (no parameters, start movement in same direction/reverse direction/infuse/withdraw respectively)
- RUN = "run"
- REVERSE_RUN = "rrun"
- INFUSE = "irun"
- WITHDRAW = "wrun"
-
- # STOP movement
- STOP = "stp"
-
- # Max applied force (in percent)
- FORCE = "FORCE"
-
- # Syringe diameter
- DIAMETER = "diameter"
-
- METRICS = "metrics"
- CURRENT_MOVING_RATE = "crate"
-
- # RAMP Ramping commands (infuse or withdraw)
- # setter: iramp [{start rate} {start units} {end rate} {end units} {ramp time in seconds}]
- INFUSE_RAMP = "iramp"
- GET_WITHDRAW_RAMP = "wramp"
-
- # RATE
- # returns or set rate irate [max | min | lim | {rate} {rate units}]
- INFUSE_RATE = "irate"
- INFUSE_RATE_LIMITS = "irate lim"
- WITHDRAW_RATE = "wrate"
- WITHDRAW_RATE_LIMITS = "wrate lim"
-
- # VOLUME
- SYRINGE_VOLUME = "svolume"
- INFUSED_VOLUME = "ivolume"
- WITHDRAWN_VOLUME = "wvolume"
- TARGET_VOLUME = "tvolume"
-
- # CLEAR VOLUME
- CLEAR_INFUSED_VOLUME = "civolume"
- CLEAR_WITHDRAWN_VOLUME = "cwvolume"
- CLEAR_INFUSED_WITHDRAWN_VOLUME = "cvolume"
- CLEAR_TARGET_VOLUME = "ctvolume"
-
-
-# noinspection PyProtectedMember
-class Elite11InfuseOnly(Pump):
- """
- Controls Harvard Apparatus Elite11 syringe pumps.
-
- The same protocol (Protocol11) can be used on other HA pumps, but is untested.
- Several pumps can be daisy-chained on the same serial connection, if so address 0 must be the first one.
- Read the manufacturer manual for more details.
- """
-
- # This class variable is used for daisy chains (i.e. multiple pumps on the same serial connection). Details below.
- _io_instances: Set[HarvardApparatusPumpIO] = set()
- # The mutable object (a set) as class variable creates a shared state across all the instances.
- # When several pumps are daisy-chained on the same serial port, they need to all access the same Serial object,
- # because access to the serial port is exclusive by definition (also locking there ensure thread safe operations).
- # FYI it is a borg idiom https://www.oreilly.com/library/view/python-cookbook/0596001673/ch05s23.html
-
- metadata = {
- "author": [
- {
- "first_name": "Jakob",
- "last_name": "Wolf",
- "email": "jakob.wolf@mpikg.mpg.de",
- "institution": "Max Planck Institute of Colloids and Interfaces",
- "github_username": "JB-Wolf",
- },
- {
- "first_name": "Dario",
- "last_name": "Cambie",
- "email": "dario.cambie@mpikg.mpg.de",
- "institution": "Max Planck Institute of Colloids and Interfaces",
- "github_username": "dcambie",
- },
- ],
- "stability": "beta",
- "supported": True,
- }
-
- def __init__(
- self,
- pump_io: HarvardApparatusPumpIO,
- diameter: str,
- syringe_volume: str,
- address: Optional[int] = None,
- name: Optional[str] = None,
- ):
- """Query model and version number of firmware to check pump is
- OK. Responds with a load of stuff, but the last three characters
- are the prompt XXY, where XX is the address and Y is pump status.
- The status can be one of the three: [":", ">" "<"] respectively
- when stopped, running forwards (pumping), or backwards (withdrawing).
- The prompt is used to confirm that the address is correct.
- This acts as a check to see that the pump is connected and working."""
-
- self.name = f"Pump {pump_io.name}:{address}" if name is None else name
- super().__init__(name)
-
- self.pump_io = pump_io
- Elite11InfuseOnly._io_instances.add(self.pump_io) # See above for details.
-
- self.address: int = address if address is not None else None # type: ignore
- self._version = None # Set in initialize
-
- # diameter and syringe volume - these will be set in initialize() - check values here though.
- if diameter is None:
- raise InvalidConfiguration(
- "Please provide the syringe diameter explicitly!\nThis prevents errors :)"
- )
- self._diameter = diameter
-
- if syringe_volume is None:
- raise InvalidConfiguration(
- "Please provide the syringe volume explicitly!\nThis prevents errors :)"
- )
- self._syringe_volume = syringe_volume
-
- @classmethod
- def from_config(
- cls,
- port: str,
- diameter: str,
- syringe_volume: str,
- address: int = None,
- name: str = None,
- **serial_kwargs,
- ):
- """Programmatic instantiation from configuration
-
- Many pump can be present on the same serial port with different addresses.
- This shared list of PumpIO objects allow shared state in a borg-inspired way, avoiding singletons
- This is only relevant to programmatic instantiation, i.e. when from_config() is called per each pump from a
- config file, as it is the case in the HTTP server.
- Pump_IO() manually instantiated are not accounted for.
- """
- pumpio = None
- for obj in Elite11InfuseOnly._io_instances:
- if obj._serial.port == port:
- pumpio = obj
- break
-
- # If not existing serial object are available for the port provided, create a new one
- if pumpio is None:
- pumpio = HarvardApparatusPumpIO(port, **serial_kwargs)
-
- return cls(
- pumpio,
- address=address,
- name=name,
- diameter=diameter,
- syringe_volume=syringe_volume,
- )
-
- async def initialize(self):
- """Ensure a valid connection with the pump has been established and sets parameters."""
- # Autodetect address if none provided
- print(f"THE ASDDRESS is {self.address=}")
- if self.address is None:
- self.address = self.pump_io.autodetermine_address()
-
- try:
- await self.stop()
- except IndexError as index_e:
- raise InvalidConfiguration(
- f"Check pump address! Currently {self.address=}"
- ) from index_e
-
- await self.set_syringe_diameter(self._diameter)
- await self.set_syringe_volume(self._syringe_volume)
-
- logger.info(
- f"Connected to pump '{self.name}' on port {self.pump_io.name}:{self.address}!"
- )
-
- # makes sure that a 'clean' pump is initialized.
- self._version = _parse_version(await self.version())
-
- if self._version[0] >= 3:
- await self.clear_volumes()
-
- async def _send_command_and_read_reply(
- self, command: str, parameter="", parse=True
- ) -> str:
- """Sends a command based on its template and return the corresponding reply as str"""
-
- cmd = Protocol11Command(
- command_string=command,
- target_pump_address=self.address,
- command_argument=parameter,
- )
- reply = await self.pump_io.write_and_read_reply(cmd, return_parsed=parse)
- return reply[0]
-
- async def _send_command_and_read_reply_multiline(
- self, command: str, parameter="", parse=True
- ) -> List[str]:
- """Sends a command based on its template and return the corresponding reply as str"""
-
- cmd = Protocol11Command(
- command_string=command,
- target_pump_address=self.address,
- command_argument=parameter,
- )
- return await self.pump_io.write_and_read_reply(cmd, return_parsed=parse)
-
- async def _bound_rate_to_pump_limits(self, rate: str) -> float:
- """Bound the rate provided to pump's limit. These are function of the syringe diameter.
-
- NOTE: Infusion and withdraw limits are equal!"""
- # Get current pump limits (those are function of the syringe diameter)
- limits_raw = await self._send_command_and_read_reply(
- Elite11Commands.INFUSE_RATE_LIMITS
- )
-
- # Lower limit usually expressed in nl/min so unit-aware quantities are needed
- lower_limit, upper_limit = map(flowchem_ureg, limits_raw.split(" to "))
-
- # Also add units to the provided rate
- set_rate = flowchem_ureg(rate)
-
- # Bound rate to acceptance range
- if set_rate < lower_limit:
- warnings.warn(
- f"The requested rate {rate} is lower than the minimum possible ({lower_limit})!"
- f"Setting rate to {lower_limit} instead!"
- )
- set_rate = lower_limit
-
- if set_rate > upper_limit:
- warnings.warn(
- f"The requested rate {rate} is higher than the maximum possible ({upper_limit})!"
- f"Setting rate to {upper_limit} instead!"
- )
- set_rate = upper_limit
-
- return set_rate.to("ml/min").magnitude
-
- async def version(self) -> str:
- """Returns the current firmware version reported by the pump"""
- return await self._send_command_and_read_reply(
- Elite11Commands.VERSION
- ) # '11 ELITE I/W Single 3.0.4
-
- async def get_status(self) -> PumpStatus:
- """Empty message to trigger a new reply and evaluate connection and pump current status via reply prompt"""
- status = await self._send_command_and_read_reply(
- Elite11Commands.EMPTY_MESSAGE, parse=False
- )
- return PumpStatus(status[2:3])
-
- async def is_moving(self) -> bool:
- """Evaluate prompt for current status, i.e. moving or not"""
- prompt = await self.get_status()
- return prompt in (PumpStatus.INFUSING, PumpStatus.WITHDRAWING)
-
- async def is_idle(self) -> bool:
- """Returns true if idle."""
- return not await self.is_moving()
-
- async def get_syringe_volume(self) -> str:
- """Returns the syringe volume as str w/ units."""
- return await self._send_command_and_read_reply(
- Elite11Commands.SYRINGE_VOLUME
- ) # e.g. '100 ml'
-
- async def set_syringe_volume(self, volume_w_units: str = None):
- """Sets the syringe volume in ml.
-
- :param volume_w_units: the volume of the syringe.
- """
- volume = flowchem_ureg(volume_w_units)
- await self._send_command_and_read_reply(
- Elite11Commands.SYRINGE_VOLUME, parameter=f"{volume.m_as('ml'):.15f} m"
- )
-
- async def run(self):
- """Activates pump, runs in the previously set direction."""
-
- if await self.is_moving():
- warnings.warn("Cannot start pump: already moving!")
- return
-
- await self._send_command_and_read_reply(Elite11Commands.RUN)
- logger.info("Pump movement started! (direction unspecified)")
-
- async def infuse_run(self):
- """Activates pump, runs in infuse mode."""
- if await self.is_moving():
- warnings.warn("Cannot start pump: already moving!")
- return
-
- await self._send_command_and_read_reply(Elite11Commands.INFUSE)
- logger.info("Pump movement started in infuse direction!")
-
- async def stop(self):
- """stops pump"""
- await self._send_command_and_read_reply(Elite11Commands.STOP)
- logger.info("Pump stopped")
-
- async def wait_until_idle(self):
- """Wait until the pump is no more moving"""
- while await self.is_moving():
- await asyncio.sleep(0.05)
-
- async def get_infusion_rate(self) -> str:
- """Returns the infusion rate as str w/ units"""
- return await self._send_command_and_read_reply(
- Elite11Commands.INFUSE_RATE
- ) # e.g. '0.2 ml/min'
-
- async def set_infusion_rate(self, rate: str):
- """Sets the infusion rate"""
- set_rate = await self._bound_rate_to_pump_limits(rate=rate)
- await self._send_command_and_read_reply(
- Elite11Commands.INFUSE_RATE, parameter=f"{set_rate:.10f} m/m"
- )
-
- async def get_infused_volume(self) -> str:
- """Return infused volume as string w/ units"""
- return await self._send_command_and_read_reply(Elite11Commands.INFUSED_VOLUME)
-
- async def clear_infused_volume(self):
- """Reset the pump infused volume counter to 0"""
- if self._version[0] < 3:
- warnings.warn("Command not supported by pump, update firmware!")
- return
- await self._send_command_and_read_reply(Elite11Commands.CLEAR_INFUSED_VOLUME)
-
- async def clear_volumes(self):
- """Set all pump volumes to 0"""
- await self.set_target_volume("0 ml")
- await self.clear_infused_volume()
-
- async def get_force(self):
- """
- Pump force, in percentage.
- Manufacturer suggested values are:
- stainless steel: 100%
- plastic syringes: 50% if volume <= 5 ml else 100%
- glass/glass: 30% if volume <= 20 ml else 50%
- glass/plastic: 30% if volume <= 250 ul, 50% if volume <= 5ml else 100%
- """
- percent = await self._send_command_and_read_reply(Elite11Commands.FORCE)
- return int(percent[:-1])
-
- async def set_force(self, force_percent: float):
- """Sets the pump force, see `Elite11.get_force()` for suggested values."""
- await self._send_command_and_read_reply(
- Elite11Commands.FORCE, parameter=str(int(force_percent))
- )
-
- async def get_syringe_diameter(self) -> str:
- """Syringe diameter in mm. This can be set in the interval 1 mm to 33 mm"""
- return await self._send_command_and_read_reply(Elite11Commands.DIAMETER)
-
- async def set_syringe_diameter(self, diameter_w_units: str):
- """
- Set syringe diameter. This can be set in the interval 1 mm to 33 mm
- """
- diameter = flowchem_ureg(diameter_w_units)
- if not 1 * flowchem_ureg.mm <= diameter <= 33 * flowchem_ureg.mm:
- warnings.warn(
- f"Diameter provided ({diameter}) is not valid, ignored! [Accepted range: 1-33 mm]"
- )
- return
-
- await self._send_command_and_read_reply(
- Elite11Commands.DIAMETER, parameter=f"{diameter.to('mm').magnitude:.4f} mm"
- )
-
- async def get_current_flowrate(self) -> str:
- """
- If pump moves, this returns the current moving rate. If not running empty string.
- :return: current moving rate
- """
- if await self.is_moving():
- return await self._send_command_and_read_reply(
- Elite11Commands.CURRENT_MOVING_RATE
- )
- warnings.warn("Pump is not moving, cannot provide moving rate!")
- return ""
-
- async def get_target_volume(self) -> str:
- """Returns target volume or a falsy empty string if not set."""
-
- target_vol = await self._send_command_and_read_reply(
- Elite11Commands.TARGET_VOLUME
- )
- if "Target volume not set" in target_vol:
- return ""
- return target_vol
-
- async def set_target_volume(self, volume: str):
- """
- Sets target volume in ml. If the volume is set to 0, the target is cleared.
- """
- target_volume = flowchem_ureg(volume)
- if target_volume.magnitude == 0:
- await self._send_command_and_read_reply(Elite11Commands.CLEAR_TARGET_VOLUME)
- else:
- set_vol = await self._send_command_and_read_reply(
- Elite11Commands.TARGET_VOLUME,
- parameter=f"{target_volume.m_as('ml')} m",
- )
- if "Argument error" in set_vol:
- warnings.warn(
- f"Cannot set target volume of {target_volume} with a "
- f"{self.get_syringe_volume()} syringe!"
- )
-
- async def pump_info(self) -> PumpInfo:
- """Returns much info
-
- e.g.
- ('Pump type Pump 11',
- 'Pump type string 11 ELITE I/W Single',
- 'Display type Sharp',
- 'Steps per rev 400',
- 'Gear ratio 1:1',
- 'Pulley ratio 2.4:1',
- 'Lead screw 24 threads per inch',
- 'Microstepping 16 microsteps per step',
- 'Low speed limit 27 seconds',
- 'High speed limit 26 microseconds',
- 'Motor polarity Reverse',
- 'Min syringe size 0.1 mm',
- 'Max syringe size 33 mm',
- 'Min raw force % 20%',
- 'Max raw force % 80%',
- 'Encoder 100 lines',
- 'Direction Infuse/withdraw',
- 'Programmable Yes',
- 'Limit switches No',
- 'Command set None', '')
- """
- parsed_multiline_response = await self._send_command_and_read_reply_multiline(
- Elite11Commands.METRICS
- )
- return PumpInfo.parse_pumpstring(parsed_multiline_response)
-
- def get_router(self):
- """Creates an APIRouter for this object."""
- from fastapi import APIRouter
-
- router = APIRouter()
- router.add_api_route(
- "/parameters/syringe-volume", self.get_syringe_volume, methods=["GET"]
- )
- router.add_api_route(
- "/parameters/syringe-volume", self.set_syringe_volume, methods=["PUT"]
- )
- router.add_api_route("/parameters/force", self.get_force, methods=["PUT"])
- router.add_api_route("/parameters/force", self.set_force, methods=["PUT"])
- router.add_api_route("/run", self.run, methods=["PUT"])
- router.add_api_route("/run/infuse", self.infuse_run, methods=["PUT"])
- router.add_api_route("/stop", self.stop, methods=["PUT"])
- router.add_api_route("/infusion-rate", self.get_infusion_rate, methods=["GET"])
- router.add_api_route("/infusion-rate", self.set_infusion_rate, methods=["PUT"])
- router.add_api_route("/info/version", self.version, methods=["GET"])
- router.add_api_route(
- "/info/status", self.get_status, methods=["GET"], response_model=PumpStatus
- )
- router.add_api_route("/info/is-moving", self.is_moving, methods=["GET"])
- router.add_api_route(
- "/info/current-flowrate", self.get_current_flowrate, methods=["GET"]
- )
- router.add_api_route(
- "/info/infused-volume", self.get_infused_volume, methods=["GET"]
- )
- router.add_api_route(
- "/info/reset-infused-volume", self.clear_infused_volume, methods=["PUT"]
- )
- router.add_api_route("/info/reset-all", self.clear_volumes, methods=["GET"])
-
- return router
-
-
-# noinspection PyProtectedMember
-class Elite11InfuseWithdraw(Elite11InfuseOnly):
- """
- Controls Harvard Apparatus Elite11 syringe pumps - INFUSE AND WITHDRAW.
- """
-
- def __init__(
- self,
- pump_io: HarvardApparatusPumpIO,
- diameter: str,
- syringe_volume: str,
- address: Optional[int] = None,
- name: Optional[str] = None,
- ):
- """Query model and version number of firmware to check pump is
- OK. Responds with a load of stuff, but the last three characters
- are the prompt XXY, where XX is the address and Y is pump status.
- The status can be one of the three: [":", ">" "<"] respectively
- when stopped, running forwards (pumping), or backwards (withdrawing).
- The prompt is used to confirm that the address is correct.
- This acts as a check to see that the pump is connected and working."""
-
- super().__init__(pump_io, diameter, syringe_volume, address, name)
-
- async def initialize(self):
- """Ensure a valid connection with the pump has been established and sets parameters."""
- await super().initialize()
-
- # Additionally, ensure pump support withdrawing upon initialization
- pump_info = await self.pump_info()
- assert not pump_info.infuse_only
-
- async def inverse_run(self):
- """Activates pump, runs opposite to previously set direction."""
- if await self.is_moving():
- warnings.warn("Cannot start pump: already moving!")
- return
-
- await self._send_command_and_read_reply(Elite11Commands.REVERSE_RUN)
- logger.info("Pump movement started in reverse direction!")
-
- async def withdraw_run(self):
- """Activates pump, runs in withdraw mode."""
- if await self.is_moving():
- warnings.warn("Cannot start pump: already moving!")
- return
-
- await self._send_command_and_read_reply(Elite11Commands.WITHDRAW)
-
- logger.info("Pump movement started in withdraw direction!")
-
- async def get_withdraw_rate(self) -> str:
- """Returns the infusion rate as a string w/ units"""
- return await self._send_command_and_read_reply(Elite11Commands.WITHDRAW_RATE)
-
- async def set_withdraw_rate(self, rate: str):
- """Sets the infusion rate"""
- set_rate = await self._bound_rate_to_pump_limits(rate=rate)
- await self._send_command_and_read_reply(
- Elite11Commands.WITHDRAW_RATE, parameter=f"{set_rate} m/m"
- )
-
- async def get_withdrawn_volume(self) -> str:
- """Returns the withdrawn volume from the last clear_*_volume() command, according to the pump"""
- return await self._send_command_and_read_reply(Elite11Commands.WITHDRAWN_VOLUME)
-
- async def clear_withdrawn_volume(self):
- """Reset the pump withdrawn volume counter to 0"""
- await self._send_command_and_read_reply(Elite11Commands.CLEAR_WITHDRAWN_VOLUME)
-
- async def clear_volumes(self):
- """Set all pump volumes to 0"""
- await self.set_target_volume("0 ml")
- await self.clear_infused_volume()
- await self.clear_withdrawn_volume()
-
- def get_router(self):
- router = super().get_router()
- # Creates an APIRouter for this object.
- router.add_api_route("/run/inverse", self.inverse_run, methods=["PUT"])
- router.add_api_route("/run/withdraw", self.withdraw_run, methods=["PUT"])
- router.add_api_route("/withdraw-rate", self.get_withdraw_rate, methods=["GET"])
- router.add_api_route("/withdraw-rate", self.set_withdraw_rate, methods=["PUT"])
- router.add_api_route(
- "/info/withdrawn-volume", self.get_withdrawn_volume, methods=["GET"]
- )
- router.add_api_route(
- "/info/reset-withdrawn", self.clear_withdrawn_volume, methods=["PUT"]
- )
-
- return router
-
- async def __aenter__(self):
- await self.initialize()
- return self
-
- async def __aexit__(self, exc_type, exc_value, traceback):
- await self.stop()
-
- async def _update(self):
- """Actuates flow rate changes."""
- if self.rate == 0:
- await self.stop()
- else:
- await self.set_infusion_rate(str(self.rate))
- await self.infuse_run()
-
-
-if __name__ == "__main__":
- pump = Elite11InfuseOnly.from_config(
- port="COM4", syringe_volume="10 ml", diameter="10 mm"
- )
-
- async def main():
- """Test function"""
- await pump.initialize()
- # assert await pump.get_infused_volume() == 0
- await pump.set_syringe_diameter("30 mm")
- await pump.set_infusion_rate("0.1 ml/min")
- await pump.set_target_volume("0.05 ml")
- await pump.infuse_run()
- await asyncio.sleep(2)
- await pump.pump_info()
-
- asyncio.run(main())
diff --git a/flowchem/components/devices/Harvard_Apparatus/__init__.py b/flowchem/components/devices/Harvard_Apparatus/__init__.py
deleted file mode 100644
index 800fdbf0..00000000
--- a/flowchem/components/devices/Harvard_Apparatus/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-""" Harvard Apparatus devices """
-from .HA_elite11 import Elite11InfuseOnly, Elite11InfuseWithdraw, HarvardApparatusPumpIO
-
-__all__ = ["Elite11InfuseOnly", "Elite11InfuseWithdraw", "HarvardApparatusPumpIO"]
diff --git a/flowchem/components/devices/Huber/__init__.py b/flowchem/components/devices/Huber/__init__.py
deleted file mode 100644
index 6d99cc2e..00000000
--- a/flowchem/components/devices/Huber/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-""" Huber devices """
-from .huberchiller import HuberChiller, PBCommand
-
-__all__ = ["HuberChiller", "PBCommand"]
diff --git a/flowchem/components/devices/Huber/huberchiller.py b/flowchem/components/devices/Huber/huberchiller.py
deleted file mode 100644
index c2f8beb0..00000000
--- a/flowchem/components/devices/Huber/huberchiller.py
+++ /dev/null
@@ -1,552 +0,0 @@
-"""
-Driver for Huber chillers.
-"""
-import asyncio
-import warnings
-from dataclasses import dataclass
-from typing import Dict, List, Optional
-
-import aioserial
-import pint
-from loguru import logger
-
-from flowchem.components.properties import TempControl
-from flowchem.exceptions import DeviceError, InvalidConfiguration
-from flowchem.units import flowchem_ureg
-
-
-@dataclass
-class PBCommand:
- """Class representing a PBCommand"""
-
- command: str
-
- def to_chiller(self) -> bytes:
- """Validate and encode to bytes array to be transmitted."""
- self.validate()
- return self.command.encode("ascii")
-
- def validate(self):
- """Check command structure to be compliant with PB format"""
- if len(self.command) == 8:
- self.command += "\r\n"
- # 10 characters
- assert len(self.command) == 10
- # Starts with {
- assert self.command[0] == "{"
- # M for master (commands) S for slave (replies).
- assert self.command[1] in ("M", "S")
- # Address, i.e. the desired function. Hex encoded.
- assert 0 <= int(self.command[2:4], 16) < 256
- # Value
- assert self.command[4:8] == "****" or 0 <= int(self.command[4:8], 16) <= 65536
- # EOL
- assert self.command[8:10] == "\r\n"
-
- @property
- def data(self) -> str:
- """Data portion of PBCommand."""
- return self.command[4:8]
-
- def parse_temperature(self) -> str:
- """Parse a device temp from hex string to celsius float [two's complement 16-bit signed hex, see manual]"""
- temp = (
- (int(self.data, 16) - 65536) / 100
- if int(self.data, 16) > 32767
- else (int(self.data, 16)) / 100
- )
- # -151 used for invalid temperatures
- if temp == -151:
- return ""
- return str(flowchem_ureg(f"{temp} °C"))
-
- def parse_integer(self) -> int:
- """Parse a device reply from hexadecimal string to base 10 integers."""
- return int(self.data, 16)
-
- def parse_rpm(self) -> str:
- """Parse a device reply from hexadecimal string to rpm."""
- return str(flowchem_ureg(f"{self.parse_integer()} rpm"))
-
- def parse_bits(self) -> List[bool]:
- """Parse a device reply from hexadecimal string to 16 constituting bits."""
- bits = f"{int(self.data, 16):016b}"
- return [bool(int(x)) for x in bits]
-
- def parse_boolean(self):
- """Parse a device reply from hexadecimal string (0x0000 or 0x0001) to boolean."""
- return self.parse_integer() == 1
-
- def parse_status1(self) -> Dict[str, bool]:
- """Parse response to status1 command and returns dict"""
- bits = self.parse_bits()
- return dict(
- temp_ctl_is_process=bits[0],
- circulation_active=bits[1],
- refrigerator_on=bits[2],
- temp_is_process=bits[3],
- circulating_pump=bits[4],
- cooling_power_available=bits[5],
- tkeylock=bits[6],
- is_pid_auto=bits[7],
- error=bits[8],
- warning=bits[9],
- int_temp_mode=bits[10],
- ext_temp_mode=bits[11],
- dv_e_grade=bits[12],
- power_failure=bits[13],
- freeze_protection=bits[14],
- )
-
- def parse_status2(self) -> Dict[str, bool]:
- """Parse response to status2 command and returns dict. See manufacturer docs for more info"""
- bits = self.parse_bits()
- return dict(
- controller_is_external=bits[0],
- drip_tray_full=bits[5],
- venting_active=bits[7],
- venting_successful=bits[8],
- venting_monitored=bits[9],
- )
-
-
-class HuberChiller(TempControl):
- """
- Control class for Huber chillers.
- """
-
- def __init__(self, aio: aioserial.AioSerial, name=None):
- super().__init__(name)
- self._serial = aio
-
- @classmethod
- def from_config(cls, port, name=None, **serial_kwargs):
- """
- Create instance from config dict. Used by server to initialize obj from config.
-
- Only required parameter is 'port'. Optional 'loop' + others (see AioSerial())
- """
- try:
- serial_object = aioserial.AioSerial(port, **serial_kwargs)
- except aioserial.SerialException as serial_exception:
- raise InvalidConfiguration(
- f"Cannot connect to the HuberChiller on the port <{port}>"
- ) from serial_exception
-
- return cls(serial_object, name)
-
- async def initialize(self):
- """Ensure the connection w/ device is working."""
- serial_num = await self.serial_number()
- if serial_num == 0:
- raise DeviceError("No reply received from Huber Chiller!")
- logger.debug(f"Connected with Huber Chiller S/N {serial_num}")
-
- async def send_command_and_read_reply(self, command: str) -> str:
- """Sends a command to the chiller and reads the reply.
-
- :param command: string to be transmitted
- :return: reply received
- """
- # Send command. Using PBCommand ensure command validation, see PBCommand.to_chiller()
- pb_command = PBCommand(command.upper())
- await self._serial.write_async(pb_command.to_chiller())
- logger.debug(f"Command {command[0:8]} sent to chiller!")
-
- # Receive reply and return it after decoding
- try:
- reply = await asyncio.wait_for(self._serial.readline_async(), 1)
- except asyncio.TimeoutError:
- warnings.warn(
- "No reply received. Likely the command is not supported by the hardware!"
- )
- logger.error("No reply received")
- return command.replace("M", "S").replace(
- "****", "0000"
- ) # Fake reply to keep going
-
- logger.debug(f"Reply {reply[0:8].decode('ascii')} received")
- return reply.decode("ascii")
-
- async def get_temperature_setpoint(self) -> str:
- """Returns the set point used by temperature controller. Internal if not probe, otherwise process temp."""
- reply = await self.send_command_and_read_reply("{M00****")
- return PBCommand(reply).parse_temperature()
-
- async def set_temperature_setpoint(self, temp: str):
- """Set the set point used by temperature controller. Internal if not probe, otherwise process temp."""
- min_t = flowchem_ureg(await self.min_setpoint())
- max_t = flowchem_ureg(await self.max_setpoint())
- temp = flowchem_ureg(temp)
-
- if temp > max_t:
- temp = max_t
- warnings.warn(
- f"Temperature requested {temp} is out of range [{min_t} - {max_t}] for HuberChiller {self}!"
- f"Setting to {max_t} instead."
- )
- if temp < min_t:
- temp = min_t
- warnings.warn(
- f"Temperature requested {temp} is out of range [{min_t} - {max_t}] for HuberChiller {self}!"
- f"Setting to {min_t} instead."
- )
-
- await self.send_command_and_read_reply("{M00" + self._temp_to_string(temp))
-
- async def internal_temperature(self) -> str:
- """Returns internal temp (bath temperature)."""
- reply = await self.send_command_and_read_reply("{M01****")
- return PBCommand(reply).parse_temperature()
-
- async def process_temperature(self) -> str:
- """Returns the current process temperature. If not T probe, the device returns -151, here parsed as None."""
- reply = await self.send_command_and_read_reply("{M3A****")
- return PBCommand(reply).parse_temperature()
-
- async def return_temperature(self) -> str:
- """Returns the temp of the thermal fluid flowing back to the device."""
- reply = await self.send_command_and_read_reply("{M02****")
- return PBCommand(reply).parse_temperature()
-
- async def pump_pressure(self) -> str:
- """Return pump pressure in mbar (note that you probably want barg, i.e. to remove 1 bar)"""
- reply = await self.send_command_and_read_reply("{M03****")
- pressure = PBCommand(reply).parse_integer()
- return str(flowchem_ureg(f"{pressure} mbar"))
-
- async def current_power(self) -> str:
- """Returns the current power in Watts (negative for cooling, positive for heating)."""
- reply = await self.send_command_and_read_reply("{M04****")
- power = PBCommand(reply).parse_integer()
- return str(flowchem_ureg(f"{power} watt"))
-
- async def status(self) -> Dict[str, bool]:
- """Returns the info contained in vstatus1 as dict."""
- reply = await self.send_command_and_read_reply("{M0A****")
- return PBCommand(reply).parse_status1()
-
- async def status2(self) -> Dict[str, bool]:
- """Returns the info contained in vstatus2 as dict."""
- reply = await self.send_command_and_read_reply("{M3C****")
- return PBCommand(reply).parse_status2()
-
- async def is_temperature_control_active(self) -> bool:
- """Returns whether temperature control is active or not."""
- reply = await self.send_command_and_read_reply("{M14****")
- return PBCommand(reply).parse_boolean()
-
- async def start_temperature_control(self):
- """Starts temperature control, i.e. start operation."""
- await self.send_command_and_read_reply("{M140001")
-
- async def stop_temperature_control(self):
- """Stops temperature control, i.e. stop operation."""
- await self.send_command_and_read_reply("{M140000")
-
- async def is_circulation_active(self) -> bool:
- """Returns whether temperature control is active or not."""
- reply = await self.send_command_and_read_reply("{M16****")
- return PBCommand(reply).parse_boolean()
-
- async def start_circulation(self):
- """Starts circulation pump."""
- await self.send_command_and_read_reply("{M160001")
-
- async def stop_circulation(self):
- """Stops circulation pump."""
- await self.send_command_and_read_reply("{M160000")
-
- async def pump_speed(self) -> str:
- """Returns current circulation pump speed (in rpm)."""
- reply = await self.send_command_and_read_reply("{M26****")
- return PBCommand(reply).parse_rpm()
-
- async def pump_speed_setpoint(self) -> str:
- """Returns the set point of the circulation pump speed (in rpm)."""
- reply = await self.send_command_and_read_reply("{M48****")
- return PBCommand(reply).parse_rpm()
-
- async def set_pump_speed(self, rpm: str):
- """Set the pump speed, in rpm. See device display for range."""
- parsed_rpm = flowchem_ureg(rpm)
- await self.send_command_and_read_reply(
- "{M48" + self._int_to_string(parsed_rpm.m_as("rpm"))
- )
-
- async def cooling_water_temp(self) -> str:
- """Returns the cooling water inlet temperature (in Celsius)."""
- reply = await self.send_command_and_read_reply("{M2C****")
- return PBCommand(reply).parse_temperature()
-
- async def cooling_water_pressure(self) -> Optional[float]:
- """Returns the cooling water inlet pressure (in mbar)."""
- reply = await self.send_command_and_read_reply("{M2D****")
- if pressure := PBCommand(reply).parse_integer() == 64536:
- return None
- return pressure
-
- async def cooling_water_temp_outflow(self) -> str:
- """Returns the cooling water outlet temperature (in Celsius)."""
- reply = await self.send_command_and_read_reply("{M4C****")
- return PBCommand(reply).parse_temperature()
-
- async def min_setpoint(self) -> str:
- """Returns the minimum accepted value for the temperature setpoint (in Celsius)."""
- reply = await self.send_command_and_read_reply("{M30****")
- return PBCommand(reply).parse_temperature()
-
- async def max_setpoint(self) -> str:
- """Returns the maximum accepted value for the temperature setpoint (in Celsius)."""
- reply = await self.send_command_and_read_reply("{M31****")
- return PBCommand(reply).parse_temperature()
-
- async def alarm_max_internal_temp(self) -> str:
- """Returns the max internal temp before the alarm is triggered and a fault generated."""
- reply = await self.send_command_and_read_reply("{M51****")
- return PBCommand(reply).parse_temperature()
-
- async def set_alarm_max_internal_temp(self, temp: str):
- """Sets the max internal temp before the alarm is triggered and a fault generated."""
- temp = flowchem_ureg(temp)
- await self.send_command_and_read_reply("{M51" + self._temp_to_string(temp))
-
- async def alarm_min_internal_temp(self) -> str:
- """Returns the min internal temp before the alarm is triggered and a fault generated."""
- reply = await self.send_command_and_read_reply("{M52****")
- return PBCommand(reply).parse_temperature()
-
- async def set_alarm_min_internal_temp(self, temp: str):
- """Sets the min internal temp before the alarm is triggered and a fault generated."""
- temp = flowchem_ureg(temp)
- await self.send_command_and_read_reply("{M52" + self._temp_to_string(temp))
-
- async def alarm_max_process_temp(self) -> str:
- """Returns the max process temp before the alarm is triggered and a fault generated."""
- reply = await self.send_command_and_read_reply("{M53****")
- return PBCommand(reply).parse_temperature()
-
- async def set_alarm_max_process_temp(self, temp: str):
- """Sets the max process temp before the alarm is triggered and a fault generated."""
- temp = flowchem_ureg(temp)
- await self.send_command_and_read_reply("{M53" + self._temp_to_string(temp))
-
- async def alarm_min_process_temp(self) -> str:
- """Returns the min process temp before the alarm is triggered and a fault generated."""
- reply = await self.send_command_and_read_reply("{M54****")
- return PBCommand(reply).parse_temperature()
-
- async def set_alarm_min_process_temp(self, temp: str):
- """Sets the min process temp before the alarm is triggered and a fault generated."""
- temp = flowchem_ureg(temp)
- await self.send_command_and_read_reply("{M54" + self._temp_to_string(temp))
-
- async def set_ramp_duration(self, ramp_time: str):
- """Sets the duration (in seconds) of a ramp to the temperature set by a later call to ramp_to_temperature."""
- parsed_time = flowchem_ureg(ramp_time)
- await self.send_command_and_read_reply(
- "{M59" + self._int_to_string(parsed_time.m_as("s"))
- )
-
- async def ramp_to_temperature(self, temperature: str):
- """Sets the duration (in seconds) of a ramp to the temperature set by a later call to start_ramp()."""
- temp = flowchem_ureg(temperature)
- await self.send_command_and_read_reply("{M5A" + self._temp_to_string(temp))
-
- async def is_venting(self) -> bool:
- """Whether the chiller is venting or not."""
- reply = await self.send_command_and_read_reply("{M6F****")
- return PBCommand(reply).parse_boolean()
-
- async def start_venting(self):
- """Starts venting. ONLY USE DURING SETUP! READ THE MANUAL!"""
- await self.send_command_and_read_reply("{M6F0001")
-
- async def stop_venting(self):
- """Stops venting."""
- await self.send_command_and_read_reply("{M6F0000")
-
- async def is_draining(self) -> bool:
- """Whether the chiller is venting or not."""
- reply = await self.send_command_and_read_reply("{M70****")
- return PBCommand(reply).parse_boolean()
-
- async def start_draining(self):
- """Starts venting. ONLY USE DURING SHUT DOWN! READ THE MANUAL!"""
- await self.send_command_and_read_reply("{M700001")
-
- async def stop_draining(self):
- """Stops venting."""
- await self.send_command_and_read_reply("{M700000")
-
- async def serial_number(self) -> int:
- """GGet serial number."""
- serial1 = await self.send_command_and_read_reply("{M1B****")
- serial2 = await self.send_command_and_read_reply("{M1C****")
- pb1, pb2 = PBCommand(serial1), PBCommand(serial2)
- return int(pb1.data + pb2.data, 16)
-
- async def wait_for_temperature_simple(self) -> None:
- """Returns as soon as the target temperature range has been reached, or timeout."""
- raise NotImplementedError
-
- async def wait_for_temperature_stable(self) -> None:
- """Returns when the target temperature range has been maintained for X seconds, or timeout."""
- raise NotImplementedError
-
- @staticmethod
- def _temp_to_string(temp: pint.Quantity) -> str:
- """From temperature to string for command. f^-1 of PCommand.parse_temperature."""
- min_temp = flowchem_ureg("-151 °C")
- max_temp = flowchem_ureg("327 °C")
- if not isinstance(temp, pint.Quantity):
- logger.warning(
- f"Implicit assumption that the temperature provided [{temp}] is in Celsius. Add units pls!"
- )
- temp = flowchem_ureg(f"{temp} °C")
- assert min_temp <= temp <= max_temp
- # Hexadecimal two's complement
- return f"{int(temp.m_as('°C') * 100) & 65535:04X}"
-
- @staticmethod
- def _int_to_string(number: int) -> str:
- """From temperature to string for command. f^-1 of PCommand.parse_integer."""
- return f"{number:04X}"
-
- async def __aenter__(self):
- await self.initialize()
- await self.set_temperature_setpoint(temp="20 °C")
- await self.set_temperature_setpoint(temp="20 °C")
- await self.start_temperature_control()
- await self.start_circulation()
- return self
-
- async def __aexit__(self, exc_type, exc_value, traceback):
- await self.set_temperature_setpoint("20 °C")
-
- # Wait until close to room temperature before turning off chiller
- while flowchem_ureg.parse_expression(
- await self.process_temperature()
- ) > flowchem_ureg.parse_expression("40 °C"):
- await asyncio.sleep(5)
-
- # Actually turn off chiller
- await self.stop_circulation()
- await self.stop_temperature_control()
-
- async def _update(self):
- await self.set_temperature_setpoint(self.temp)
-
- def get_router(self):
- """Creates an APIRouter for this HuberChiller instance."""
- # Local import to allow direct use of HuberChiller w/o fastapi installed
- from fastapi import APIRouter
-
- router = APIRouter()
- router.add_api_route(
- "/temperature/set-point", self.get_temperature_setpoint, methods=["GET"]
- )
- router.add_api_route(
- "/temperature/set-point", self.set_temperature_setpoint, methods=["PUT"]
- )
- router.add_api_route(
- "/temperature/set-point/min", self.min_setpoint, methods=["GET"]
- )
- router.add_api_route(
- "/temperature/set-point/max", self.max_setpoint, methods=["GET"]
- )
- router.add_api_route(
- "/temperature/process", self.process_temperature, methods=["GET"]
- )
- router.add_api_route(
- "/temperature/internal", self.internal_temperature, methods=["GET"]
- )
- router.add_api_route(
- "/temperature/return", self.return_temperature, methods=["GET"]
- )
- router.add_api_route("/power-exchanged", self.current_power, methods=["GET"])
- router.add_api_route("/status", self.status, methods=["GET"])
- router.add_api_route("/status2", self.status2, methods=["GET"])
- router.add_api_route("/pump/speed", self.pump_speed, methods=["GET"])
- router.add_api_route(
- "/temperature-control", self.is_temperature_control_active, methods=["GET"]
- )
- router.add_api_route(
- "/temperature-control/start",
- self.start_temperature_control,
- methods=["GET"],
- )
- router.add_api_route(
- "/temperature-control/stop", self.stop_temperature_control, methods=["GET"]
- )
- router.add_api_route(
- "/pump/circulation", self.is_circulation_active, methods=["GET"]
- )
- router.add_api_route(
- "/pump/circulation/start", self.start_circulation, methods=["GET"]
- )
- router.add_api_route(
- "/pump/circulation/stop", self.stop_circulation, methods=["GET"]
- )
- router.add_api_route("/pump/pressure", self.pump_pressure, methods=["GET"])
- router.add_api_route("/pump/speed", self.pump_speed, methods=["GET"])
- router.add_api_route(
- "/pump/speed/set-point", self.pump_speed_setpoint, methods=["GET"]
- )
- router.add_api_route(
- "/pump/speed/set-point", self.set_pump_speed, methods=["PUT"]
- )
- router.add_api_route(
- "/cooling-water/temperature-inlet", self.cooling_water_temp, methods=["GET"]
- )
- router.add_api_route(
- "/cooling-water/temperature-outlet",
- self.cooling_water_temp_outflow,
- methods=["GET"],
- )
- router.add_api_route(
- "/cooling-water/pressure", self.cooling_water_pressure, methods=["GET"]
- )
- router.add_api_route(
- "/alarm/process/min-temp", self.alarm_min_process_temp, methods=["GET"]
- )
- router.add_api_route(
- "/alarm/process/max-temp", self.alarm_max_process_temp, methods=["GET"]
- )
- router.add_api_route(
- "/alarm/process/min-temp", self.set_alarm_min_process_temp, methods=["PUT"]
- )
- router.add_api_route(
- "/alarm/process/max-temp", self.set_alarm_min_process_temp, methods=["PUT"]
- )
- router.add_api_route(
- "/alarm/internal/min-temp", self.alarm_min_internal_temp, methods=["GET"]
- )
- router.add_api_route(
- "/alarm/internal/max-temp", self.alarm_max_internal_temp, methods=["GET"]
- )
- router.add_api_route(
- "/alarm/internal/min-temp",
- self.set_alarm_min_internal_temp,
- methods=["PUT"],
- )
- router.add_api_route(
- "/alarm/internal/max-temp",
- self.set_alarm_min_internal_temp,
- methods=["PUT"],
- )
- router.add_api_route("/venting/is_venting", self.is_venting, methods=["GET"])
- router.add_api_route("/venting/start", self.start_venting, methods=["GET"])
- router.add_api_route("/venting/stop", self.stop_venting, methods=["GET"])
- router.add_api_route("/draining/is_venting", self.is_draining, methods=["GET"])
- router.add_api_route("/draining/start", self.start_draining, methods=["GET"])
- router.add_api_route("/draining/stop", self.stop_draining, methods=["GET"])
- router.add_api_route("/serial_number", self.serial_number, methods=["GET"])
-
- return router
-
-
-if __name__ == "__main__":
- chiller = HuberChiller(aioserial.AioSerial(port="COM8"))
- status = asyncio.run(chiller.status())
- print(status)
diff --git a/flowchem/components/devices/Knauer/HPLC_control.py b/flowchem/components/devices/Knauer/HPLC_control.py
deleted file mode 100644
index 9573ebfe..00000000
--- a/flowchem/components/devices/Knauer/HPLC_control.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# This could become a mess...
-# what needs to be done is switch the lamps on, which works over serial.
-# the rest is just sending commands to the console, possibly also to another machine
-
-# https://www.dataapex.com/documentation/Content/Help/110-technical-specifications/110.020-command-line-parameters/110.020-command-line-parameters.htm?Highlight=command%20line
-
-import socket
-import subprocess
-from pathlib import Path
-from threading import Thread
-from time import sleep
-from typing import Union
-
-import tenacity
-
-from flowchem.exceptions import InvalidConfiguration
-
-try:
- # noinspection PyUnresolvedReferences
- from flowchem.components.devices.Knauer.Knauer_HPLC_NDA import Lamp_Command
-
- HAS_KNAUER_COMMANDS = True
-except ModuleNotFoundError:
- HAS_KNAUER_COMMANDS = False
- raise ModuleNotFoundError("You need to get the NDA communication from Knauer.")
-
-# Todo should have a command constructor dataclass, would be more neat. For now, will do without to get it running asap
-
-# TODO Very weird, when starting from synthesis, fractioning valve is blocked. no idea why, it's ip is not used.
-
-
-class ClarityInterface:
- def __init__(
- self,
- remote: bool = False,
- host: str = None,
- port: int = None,
- path_to_executable: str = None,
- instrument_number: int = 1,
- ):
- if not HAS_KNAUER_COMMANDS:
- raise InvalidConfiguration(
- "Knauer Lamps unusable: no Knauer Commands available.\n"
- "Contact your distributor to get the serial API documentation."
- )
- # just determine path to executable, and open socket if for remote usage
- self.remote = remote
- self.instrument = instrument_number
- self.path_to_executable = path_to_executable
- if self.remote:
- self.interface = MessageSender(host, port)
- self.command_executor = self.interface.open_socket_and_send
- else:
- self.command_executor = ClarityExecutioner.execute_command # type:ignore
-
- # TODO would have to have some way to fail
- @classmethod
- def from_config(cls, config_dict: dict):
- try:
- pass
- except:
- pass
-
- # if remote execute everything on other PC, else on this
- # Todo doesn't make sense here, done other way
- def execute_command(self, command_string):
- if self.remote:
- self.command_executor(command_string)
- else:
- self.command_executor(command_string, self.path_to_executable)
-
- # bit displaced convenience function to switch on the lamps of hplc detector.
- # TODO remove if published
- def switch_lamp_on(self, address="192.168.10.111", port=10001):
- """
- Has to be performed BEFORE starting clarity, otherwise sockets get blocked
- Args:
- address:
- port:
-
- Returns:
-
- """
-
- # send the respective two commands and check return. Send to socket
- message_sender = MessageSender(address, port)
- message_sender.open_socket_and_send(Lamp_Command.deut_lamp_on)
- sleep(1)
- message_sender.open_socket_and_send(Lamp_Command.hal_lamp_on)
- sleep(15)
-
- # define relevant strings
- def open_clarity_chrom(
- self, user: str, config_file: str, password: str = None, start_method: str = ""
- ):
- """
- start_method: supply the path to the method to start with, this is important for a soft column start
- config file: if you want to start with specific instrumment configuration, specify location of config file here
- """
- if not password:
- self.execute_command(
- f"i={self.instrument} cfg={config_file} u={user} {start_method}"
- )
- else:
- self.execute_command(
- f"i={self.instrument} cfg={config_file} u={user} p={password} {start_method}"
- )
- sleep(20)
-
- # TODO should be OS agnostic
- def slow_flowrate_ramp(self, path: str, method_list: tuple = ()):
- """
- path: path where the methods are located
- method list
- """
- for current_method in method_list:
- self.execute_command(f"i={self.instrument} {path}\\{current_method}")
- # not very elegant, but sending and setting method takes at least 10 seconds, only has to run during platform startup and can't see more elegant way how to do that
- sleep(20)
-
- def load_file(self, path_to_file: str):
- """has to be done to open project, then method. Take care to select 'Send Method to Instrument' option in Method
- Sending Options dialog in System Configuration."""
- self.execute_command(f"i={self.instrument} {path_to_file}")
- sleep(10)
-
- def set_sample_name(self, sample_name):
- """Sets the sample name for the next single run"""
- self.execute_command(f"i={self.instrument} set_sample_name={sample_name}")
- sleep(1)
-
- def run(self):
- """Runs the instrument. Care should be taken to activate automatic data export on HPLC. (can be done via command,
- but that only makes it more complicated). Takes at least 2 sec until run starts"""
- self.execute_command(f"run={self.instrument}")
-
- def exit(self):
- """Exit Clarity Chrom"""
- self.execute_command("exit")
- sleep(10)
-
-
-class MessageSender:
- def __init__(self, host, port):
- self.host = host
- self.port = port
-
- # encode('utf-8')
-
- @tenacity.retry(
- stop=tenacity.stop_after_attempt(5), wait=tenacity.wait_fixed(2), reraise=True
- )
- def open_socket_and_send(self, message: str):
- s = socket.socket()
- s.connect((self.host, self.port))
- s.sendall(message.encode("utf-8"))
- s.close()
-
-
-class ClarityExecutioner:
- """This needs to run on the computer having claritychrom installed, except for one uses the same PC. However,
- going via socket and localhost would also work, but seems a bit cumbersome.
- open up server socket. Everything coming in will be prepended with claritychrom.exe (if it is not already)"""
-
- command_prepend = "claritychrom.exe"
-
- def __init__(self, port, allowed_client="192.168.10.20", host_ip="192.168.10.11"):
- self.port = port
- self.allowed_client = allowed_client
- self.host_ip = host_ip
- # think that should also go in thread, otherwise blocks
- self.server_socket = self.open_server()
- self.executioner = Thread(target=self.get_commands_and_execute, daemon=False)
- print("a")
- self.executioner.start()
- print("b")
-
- def open_server(self):
- s = socket.socket()
- s.bind((self.host_ip, self.port))
- s.listen(5)
- return s
-
- def accept_new_connection(self):
- client_socket, address = self.server_socket.accept()
- if not address[0] == self.allowed_client:
- client_socket.close()
- print(f"nice try {client_socket, address}")
- else:
- # if below code is executed, that means the sender is connected
- print(f"[+] {address} is connected.")
- # in unicode
- request = client_socket.recv(1024).decode("utf-8")
- client_socket.close()
- print(request)
- return request
-
- # TODO: instrument number has to go into command execution
- def execute_command(
- self,
- command: str,
- folder_of_executable: Union[Path, str] = r"C:\claritychrom\bin\\",
- ):
- prefix = "claritychrom.exe"
- # sanitize input a bit
- if command.split(" ")[0] != prefix:
- command = folder_of_executable + prefix + " " + command # type:ignore
- print(command)
- try:
- x = subprocess
- x.run(command, shell=True, capture_output=False, timeout=3)
- except subprocess.TimeoutExpired:
- print("Damn, Subprocess")
-
- def get_commands_and_execute(self):
- while True:
- request = self.accept_new_connection()
- self.execute_command(request)
- sleep(1)
- print("listening")
-
-
-###TODO: also dsk or k for opening with specific desktop could be helpful-.
-# TODO Export results can be specified -> exports result, rewrite to a nicer interface
-
-if __name__ == "__main__":
- computer_w_Clarity = False
- if computer_w_Clarity:
- analyser = ClarityExecutioner(10014)
- else:
- commander = ClarityInterface(
- remote=True, host="192.168.10.11", port=10014, instrument_number=2
- )
- commander.exit()
- commander.switch_lamp_on() # address and port hardcoded
- commander.open_clarity_chrom(
- "admin",
- config_file=r"C:\ClarityChrom\Cfg\automated_exp.cfg ",
- start_method=r"D:\Data2q\sugar-optimizer\autostartup_analysis\autostartup_005_Sugar-c18_shortened.MET",
- )
- commander.slow_flowrate_ramp(
- r"D:\Data2q\sugar-optimizer\autostartup_analysis",
- method_list=(
- "autostartup_005_Sugar-c18_shortened.MET",
- "autostartup_01_Sugar-c18_shortened.MET",
- "autostartup_015_Sugar-c18_shortened.MET",
- "autostartup_02_Sugar-c18_shortened.MET",
- "autostartup_025_Sugar-c18_shortened.MET",
- "autostartup_03_Sugar-c18_shortened.MET",
- "autostartup_035_Sugar-c18_shortened.MET",
- "autostartup_04_Sugar-c18_shortened.MET",
- "autostartup_045_Sugar-c18_shortened.MET",
- "autostartup_05_Sugar-c18_shortened.MET",
- ),
- )
- commander.load_file(
- r"D:\Data2q\sugar-optimizer\autostartup_analysis\auto_Sugar-c18_shortened.MET"
- )
- # commander.load_file("opendedicatedproject") # open a project for measurements
- commander.set_sample_name("test123")
- commander.run()
diff --git a/flowchem/components/devices/Knauer/Knauer_autodiscover.py b/flowchem/components/devices/Knauer/Knauer_autodiscover.py
deleted file mode 100644
index e533aa82..00000000
--- a/flowchem/components/devices/Knauer/Knauer_autodiscover.py
+++ /dev/null
@@ -1,122 +0,0 @@
-""" Autodiscover Knauer devices on network """
-import asyncio
-import queue
-import socket
-import sys
-import time
-from threading import Thread
-from typing import Dict, Text, Tuple, Union
-
-from getmac import getmac
-from loguru import logger
-
-Address = Tuple[str, int]
-
-
-class BroadcastProtocol(asyncio.DatagramProtocol):
- """From https://gist.github.com/yluthu/4f785d4546057b49b56c"""
-
- def __init__(self, target: Address, response_queue: queue.Queue):
- self.target = target
- self.loop = asyncio.get_event_loop()
- self._queue = response_queue
-
- def connection_made(self, transport: asyncio.transports.DatagramTransport): # type: ignore
- """Called upon connection."""
- sock = transport.get_extra_info("socket") # type: socket.socket
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # sets to broadcast
- transport.sendto(b"\x00\x01\x00\xf6", self.target)
-
- def datagram_received(self, data: Union[bytes, Text], addr: Address):
- """Called on data received"""
- logger.trace(f"Received data from {addr}")
- self._queue.put(addr[0])
-
-
-async def get_device_type(ip_address: str) -> str:
- """Returns either 'Pump', 'Valve' or 'Unknown'"""
- fut = asyncio.open_connection(host=ip_address, port=10001)
- try:
- reader, writer = await asyncio.wait_for(fut, timeout=3)
- except ConnectionError:
- return "ConnectionError"
- except asyncio.TimeoutError:
- if ip_address == "192.168.1.2":
- return "TimeoutError - Nice FlowIR that you have :D"
- return "TimeoutError"
-
- # Test Pump
- writer.write("HEADTYPE:?\n\r".encode())
- reply = await reader.readuntil(separator=b"\r")
- if reply.startswith(b"HEADTYPE"):
- logger.debug(f"Device {ip_address} is a Pump")
- return "Pump"
-
- # Test Valve
- writer.write("T:?\n\r".encode())
- reply = await reader.readuntil(separator=b"\r")
- if reply.startswith(b"VALVE"):
- logger.debug(f"Device {ip_address} is a Valve")
- return "Valve"
-
- return "Unknown"
-
-
-def autodiscover_knauer(source_ip: str = "") -> Dict[str, str]:
- """
- Automatically find Knauer ethernet device on the network and returns the IP associated to each MAC address.
- Note that the MAC is the key here as it is the parameter used in configuration files.
- Knauer devices only support DHCP so static IPs are not an option.
-
-
- Args:
- source_ip: source IP for autodiscover (only relevant if multiple network interfaces are available!)
- Returns:
- List of tuples (IP, MAC, device_type), one per device replying to autodiscover
- """
-
- # Define source IP resolving local hostname.
- if not source_ip:
- hostname = socket.gethostname()
- source_ip = socket.gethostbyname(hostname)
-
- loop = asyncio.get_event_loop()
- device_q: queue.Queue = queue.Queue()
- coro = loop.create_datagram_endpoint(
- lambda: BroadcastProtocol(("255.255.255.255", 30718), response_queue=device_q),
- local_addr=(source_ip, 28688),
- )
- loop.run_until_complete(coro)
- thread = Thread(target=loop.run_forever)
- thread.start()
- time.sleep(2)
- loop.call_soon_threadsafe(loop.stop) # here
- thread.join()
-
- device_list = []
- for _ in range(40):
- try:
- device_list.append(device_q.get_nowait())
- except queue.Empty:
- break
-
- device_info = dict()
- for device_ip in device_list:
- # MAC address
- mac = getmac.get_mac_address(ip=device_ip)
- device_info[mac] = device_ip
- return device_info
-
-
-if __name__ == "__main__":
- # This is a bug of asyncio on Windows :|
- if sys.platform == "win32":
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
-
- # Autodiscover devices (dict mac as index, IP as value)
- devices = autodiscover_knauer()
-
- for mac_address, ip in devices.items():
- # Device Type
- device_type = asyncio.run(get_device_type(ip))
- print(f"MAC: {mac_address} IP: {ip} DEVICE_TYPE: {device_type}")
diff --git a/flowchem/components/devices/Knauer/__init__.py b/flowchem/components/devices/Knauer/__init__.py
deleted file mode 100644
index 60640814..00000000
--- a/flowchem/components/devices/Knauer/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-""" Knauer devices """
-from .AzuraCompactPump import AzuraCompactPump
-from .Knauer_autodiscover import autodiscover_knauer
-from .KnauerValve import (
- Knauer6Port2PositionValve,
- Knauer6Port6PositionValve,
- Knauer12PortValve,
- Knauer16PortValve,
-)
-
-__all__ = [
- "AzuraCompactPump",
- "Knauer6Port2PositionValve",
- "Knauer6Port6PositionValve",
- "Knauer12PortValve",
- "Knauer16PortValve",
-]
diff --git a/flowchem/components/devices/Magritek/__init__.py b/flowchem/components/devices/Magritek/__init__.py
deleted file mode 100644
index 5da083a4..00000000
--- a/flowchem/components/devices/Magritek/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-""" Magritek devices """
-from .nmrspectrum import NMRSpectrum
-from .spinsolve import Spinsolve
-
-__all__ = ["NMRSpectrum", "Spinsolve"]
diff --git a/flowchem/components/devices/Magritek/nmrspectrum.py b/flowchem/components/devices/Magritek/nmrspectrum.py
deleted file mode 100644
index 879716d1..00000000
--- a/flowchem/components/devices/Magritek/nmrspectrum.py
+++ /dev/null
@@ -1,67 +0,0 @@
-""" NMR-spectrum object represents an NMR spectrum. """
-import time
-from pathlib import Path
-
-import matplotlib.pyplot as plt
-import nmrglue as ng
-
-
-class NMRSpectrum:
- """General spectrum object, instantiated from Spinsolve folder w/ experimental results."""
-
- def __init__(self, location: Path):
- jcamp_file = location / "nmr_fid.dx"
- if not jcamp_file.exists():
- print("File nmr_fid.dx not existing, waiting 2 sec just in case...")
- time.sleep(2)
- self.dic, self.raw_data = ng.spinsolve.read(dir=location.as_posix())
- self.processed_data = None
-
- @property
- def uc(self):
- """
-
- Returns:
-
- """
- data = self.processed_data if self.processed_data is not None else self.raw_data
- return ng.spinsolve.make_uc(self.dic, data)
-
- def process(self):
- """Basic spectrum processing. Application-specific processing suggested."""
- # Zerofill
- self.processed_data = ng.proc_base.zf_auto(
- ng.proc_base.zf_double(self.raw_data, 1)
- )
-
- # FT
- self.processed_data = ng.proc_base.fft(self.processed_data)
-
- # Phasing
- try:
- # Try to extract phase info from JCAMP-DX file...
- ph0 = float(self.dic["dx"]["$PHC0"].pop())
- ph1 = float(self.dic["dx"]["$PHC1"].pop())
- self.processed_data = ng.proc_base.ps(self.processed_data, ph0, ph1, True)
- except KeyError:
- # Authophase needed - no info on phase from nmrglue
- self.processed_data = ng.proc_autophase.autops(
- self.processed_data,
- "acme",
- disp=False,
- )
-
- # Delete imaginary
- self.processed_data = ng.proc_base.di(self.processed_data)
-
- def plot(self, ppm_range=(8, 0)):
- """Returns spectrum as matplotlib figure"""
- if self.processed_data is None:
- self.process()
-
- fig = plt.figure()
- axes = fig.add_subplot(111)
- axes.plot(self.uc.ppm_scale(), self.processed_data)
-
- plt.xlim(ppm_range) # plot as we are used to, from positive to negative
- return fig
diff --git a/flowchem/components/devices/Magritek/parser.py b/flowchem/components/devices/Magritek/parser.py
deleted file mode 100644
index bc68573c..00000000
--- a/flowchem/components/devices/Magritek/parser.py
+++ /dev/null
@@ -1,86 +0,0 @@
-""" Functions related to instrument reply parsing """
-
-import warnings
-from enum import Enum
-
-from lxml import etree
-
-
-class StatusNotification(Enum):
- """
- Represent the type of the status notification
- """
-
- STARTED = 1 # received, starting protocol
- RUNNING = 2 # All good,