diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..35c7423a --- /dev/null +++ b/Makefile @@ -0,0 +1,92 @@ +TOPDIR := $(shell git rev-parse --show-toplevel) +BASE_VER ?= v1.2.0 +BUILD_ID ?= $(shell git describe --always --dirty) +VERSION := $(BASE_VER)-$(BUILD_ID) + + +export REPO := ezkf +REGISTRY ?= lr1-bd-harbor-registry.mip.storage.hpecorp.net/develop +EZKF_REGISTRY ?= $(REGISTRY)/$(REPO) + +docker-build: + @echo "Building JupyterLab images..." + $(foreach target, base jupyter, \ + docker build \ + --build-arg="BASE_IMG=$(EZKF_REGISTRY)/base:$(VERSION)" \ + -t $(EZKF_REGISTRY)/$(target):$(VERSION) \ + -f $(TOPDIR)/dockerfiles/notebooks/$(target)/Dockerfile \ + $(TOPDIR)/dockerfiles/notebooks/$(target); \ + ) + + @echo "Building the images for the Question-Answering demo..." + $(foreach target, app llm transformer vectorstore, \ + docker build \ + -t $(EZKF_REGISTRY)/qna-$(target):$(VERSION) \ + -f $(TOPDIR)/demos/rag-demos/question-answering/dockerfiles/$(target)/Dockerfile \ + $(TOPDIR)/demos/rag-demos/question-answering/dockerfiles/$(target); \ + ) + + @echo "Building the images for the Question-Answering GPU demo..." + $(foreach target, app transformer vectorstore triton-inference-server, \ + docker build \ + -t $(EZKF_REGISTRY)/qna-$(target)-gpu:$(VERSION) \ + -f $(TOPDIR)/demos/rag-demos/question-answering-gpu/dockerfiles/$(target)/Dockerfile \ + $(TOPDIR)/demos/rag-demos/question-answering-gpu/dockerfiles/$(target); \ + ) + + @echo "Building the images for the Fraud Detection demo..." + docker build \ + -t $(EZKF_REGISTRY)/fraud-detection-app:$(VERSION) \ + -f $(TOPDIR)/demos/fraud-detection/dockerfiles/app/Dockerfile \ + $(TOPDIR)/demos/fraud-detection/dockerfiles/app + +docker-push: + @echo "Pushing JupyterLab images..." + $(foreach target, base jupyter, \ + docker push $(EZKF_REGISTRY)/$(target):$(VERSION); \ + ) + + @echo "Pushing the images for the Question-Answering demo..." + $(foreach target, app llm transformer vectorstore, \ + docker push $(EZKF_REGISTRY)/qna-$(target):$(VERSION); \ + ) + + @echo "Pushing the images for the Question-Answering GPU demo..." + $(foreach target, app transformer vectorstore triton-inference-server, \ + docker push $(EZKF_REGISTRY)/qna-$(target)-gpu:$(VERSION); \ + ) + + @echo "Pushing the images for the Fraud Detection demo..." + docker push $(EZKF_REGISTRY)/fraud-detection-app:$(VERSION) + +################################################################################ +# Pipeline API # +################################################################################ + +version: + @echo $(VERSION) + +deliverables: + @echo + +dependencies: + @echo + +test: + @echo + +.PHONY: images +images: + @echo $(EZKF_REGISTRY)/base:$(VERSION) + @echo $(EZKF_REGISTRY)/jupyter:$(VERSION) + @echo $(EZKF_REGISTRY)/qna-app:$(VERSION) + @echo $(EZKF_REGISTRY)/qna-llm:$(VERSION) + @echo $(EZKF_REGISTRY)/qna-transformer:$(VERSION) + @echo $(EZKF_REGISTRY)/qna-vectorstore:$(VERSION) + @echo $(EZKF_REGISTRY)/qna-app-gpu:$(VERSION) + @echo $(EZKF_REGISTRY)/qna-transformer-gpu:$(VERSION) + @echo $(EZKF_REGISTRY)/qna-vectorstore-gpu:$(VERSION) + @echo $(EZKF_REGISTRY)/qna-triton-inference-server-gpu:$(VERSION) + @echo $(EZKF_REGISTRY)/fraud-detection-app:$(VERSION) + +release: docker-build docker-push diff --git a/README.md b/README.md index e4587b9b..94a194dc 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,15 @@ capabilities. ![ezua-tutorials](images/ezua-tutorials.jpg) +## Branching Model + +This repository adheres to the versioning scheme of the EzUA platform. The `develop` branch serves +as the hub for active development. Release branches are derived from the develop branch, aligning +closely with EzUA versions. For instance, `release-x.y.z` is designed to seamlessly woth with EzUA +version `x.y.z`. It's worth noting that when you clone this repository, the default branch aligns +with the most recent EzUA version. Therefore, if you're working with the latest EzUA iteration, you +can clone it and proceed without the need to switch branches. + ## Repository Structure This repository is organized into two main directories: @@ -34,8 +43,7 @@ find specialized guides that show you how to leverage EzUA's frameworks and tool The current list of framework-specific tutorials include: - [FEAST feature store](tutorials/feast/): Ride sharing tutorial -- [Kubeflow Pipelines](tutorials/kubeflow-pipelines/): Financial time series tutorial -- [RAY](tutorials/ray): News recommendation tutorial +- [Superset](tutorials/superset/): Data connection and visualization ## Getting Started diff --git a/demos/bike-sharing/README.md b/demos/bike-sharing/README.md index 5902f9a9..5097dc7d 100644 --- a/demos/bike-sharing/README.md +++ b/demos/bike-sharing/README.md @@ -32,10 +32,11 @@ For this tutorial, ensure you have: To complete the tutorial follow the steps below: 1. Login to your EzUA cluster, using your credentials. -1. Create a new Notebook server using the `jupyter-data-science` image. Request at least 4Gi of memory for the Notebook - server. +1. Create a new Notebook server using the `jupyter-data-science` image. Request at least 4Gi of + memory for the Notebook server. 1. Connect to the Notebook server, launch a new terminal window, and clone the repository locally. -1. Navigate to the tutorial's directory (`ezua-tutorials/tutorials/mlflow`) + See the troubleshooting section if this step fails. +1. Navigate to the tutorial's directory (`ezua-tutorials/demos/bike-sharing`) 1. Create your virtual environment: - Deactivate the base conda environment: ``` @@ -65,6 +66,15 @@ To complete the tutorial follow the steps below: 1. Launch the two Notebooks in order and execute the code cells. Make sure to select the `bike-sharing` environment kernel for each Notebook. +## Troubleshooting + +If you are behind a proxy you will have to set a few environment variables to be able to clone the +`ezua-tutorials` repository locally and install the dependencies via `pip`. To this end, launch a +terminal window and before cloning the repository, run the following commands: + +- `export http_proxy=` +- `export https_proxy=` + ## How it Works MLflow is an open-source platform designed to manage the end-to-end machine learning lifecycle. It encompasses tools for diff --git a/demos/bike-sharing/environment.yaml b/demos/bike-sharing/environment.yaml index 421103fa..e7a16ff9 100644 --- a/demos/bike-sharing/environment.yaml +++ b/demos/bike-sharing/environment.yaml @@ -1,7 +1,6 @@ name: bike-sharing channels: - conda-forge - - defaults dependencies: - python=3.8 - pip diff --git a/demos/fraud-detection/application/fraud-detection-app.tgz b/demos/fraud-detection/application/fraud-detection-app.tgz new file mode 100644 index 00000000..41f40943 Binary files /dev/null and b/demos/fraud-detection/application/fraud-detection-app.tgz differ diff --git a/demos/fraud-detection/application/fraud-detection.tgz b/demos/fraud-detection/application/fraud-detection.tgz deleted file mode 100644 index 3e220167..00000000 Binary files a/demos/fraud-detection/application/fraud-detection.tgz and /dev/null differ diff --git a/demos/fraud-detection/application/helm/Chart.yaml b/demos/fraud-detection/application/helm/Chart.yaml index 6d3167cf..7bd6f343 100644 --- a/demos/fraud-detection/application/helm/Chart.yaml +++ b/demos/fraud-detection/application/helm/Chart.yaml @@ -1,6 +1,6 @@ name: fraud-detection-app -version: 0.1.0 +version: 0.2.1 apiVersion: v2 -appVersion: v0.1.0 +appVersion: v0.2.1 description: Install the Fraud Detection application on EzAF type: application \ No newline at end of file diff --git a/demos/fraud-detection/application/helm/values.yaml b/demos/fraud-detection/application/helm/values.yaml index d48d2aee..fef34e11 100644 --- a/demos/fraud-detection/application/helm/values.yaml +++ b/demos/fraud-detection/application/helm/values.yaml @@ -8,7 +8,7 @@ image: repository: dpoulopoulos/fraud-detection-app pullPolicy: Always # Overrides the image tag whose default is the chart appVersion. - tag: "v0.1.0" + tag: "v0.2.1" imagePullSecrets: [] nameOverride: "" @@ -75,10 +75,10 @@ ingress: resources: limits: cpu: 100m - memory: 128Mi + memory: 1Gi requests: cpu: 100m - memory: 128Mi + memory: 1Gi autoscaling: enabled: false diff --git a/demos/rag-demos/question-answering-gpu/01.create-vectorstore.ipynb b/demos/rag-demos/question-answering-gpu/01.create-vectorstore.ipynb new file mode 100644 index 00000000..f4525067 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/01.create-vectorstore.ipynb @@ -0,0 +1,406 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "3324b7d0-515d-46a7-ac00-03e788d5e96a", + "metadata": { + "tags": [] + }, + "source": [ + "# Vector Stores: Embedding and Storing Documents in a Latent Space\n", + "\n", + "In this Jupyter Notebook, you explore a foundational element of a question-answering system: the Vector Store. The\n", + "Vector Store serves as the key component that allows you to efficiently retrieve relevant context from a corpus of\n", + "documents based on a user's query, providing the backbone of the information retrieval system.\n", + "\n", + "
\n", + " \"documents\"\n", + "
\n", + " Photo by Annie Spratt on Unsplash\n", + "
\n", + "
\n", + "\n", + "The approach you will use involves transforming each document into a high-dimensional numerical\n", + "representation known as an \"embedding\", using a fine-tuned [BGE-M3](https://arxiv.org/abs/2402.03216) embeddings model.\n", + "This process is sometimes referred to as \"embedding\" the document in a latent space. The latent space here is a\n", + "high-dimensional space where similar documents are close to each other. The position of a document in this space is\n", + "determined by the content and the semantic meaning it carries.\n", + "\n", + "Once you have these embeddings, you store them in a Vector Store. A Vector Store is an advanced AI-native database\n", + "designed to hold these high-dimensional vectors, index them, and provide efficient search capabilities. This enables you\n", + "to quickly identify documents in your corpus that are semantically similar to a given query, which will also be\n", + "represented as a vector in the same latent space. For this example, you will use [Chroma](https://www.trychroma.com/),\n", + "a popular open source vector database.\n", + "\n", + "The following cells in this Notebook guides you through the process of creating such a Vector Store. You start by\n", + "generating embeddings for each document, then you move on to storing these embeddings in a Vector Store, and finally,\n", + "you see how easy it is to to retrieve documents from the Vector Store based on a query.\n", + "\n", + "## Table of Contents\n", + "\n", + "1. [Download the Embeddings Model](#download-the-embeddings-model)\n", + "1. [Load the Documents](#load-the-documents)\n", + "1. [Document Processing](#document-processing-chunking-text-for-the-language-model)\n", + "1. [Generate and Store Embeddings](#generating-embeddings--storing-them-in-chroma)\n", + "1. [Conclusion and Next Steps](#conclusion-and-next-steps)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "85df209f-e471-45d8-ad58-ad5c34819ebe", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "import glob\n", + "\n", + "from tqdm import tqdm\n", + "from chromadb.config import Settings\n", + "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", + "from langchain_community.vectorstores import Chroma\n", + "from langchain_community.document_loaders import JSONLoader\n", + "\n", + "from embeddings import EmbeddingsModel" + ] + }, + { + "cell_type": "markdown", + "id": "4787d603-eea5-446b-bd7e-2733b0137618", + "metadata": {}, + "source": [ + "# Download the Embeddings Model\n", + "\n", + "The initial step in this process involves downloading the fine-tuned embeddings model. This is a vital step, as you will require the model to both create the Vector Store and deploy it for inference with KServe." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5b348281-6967-4fb2-aa27-5d779962f354", + "metadata": {}, + "outputs": [], + "source": [ + "# If you are behind a proxy, do not forget to set your `https_proxy` and `HTTPS_PROXY` environment variable.\n", + "# os.environ[\"https_proxy\"] = \"\"\n", + "# os.environ[\"HTTPS_PROXY\"] = \"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8097c431-da91-4811-8a48-200131032ccf", + "metadata": {}, + "outputs": [], + "source": [ + "!mkdir bge-m3 # create a directorry to download the embeddings model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2a94c8d0-5ae8-45a7-b1c6-5c6108c2b5a9", + "metadata": {}, + "outputs": [], + "source": [ + "!wget https://ezmeral-artifacts.s3.us-east-2.amazonaws.com/bge-m3.tar.gz # download the embeddings model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80041c5c-5e4f-474a-a1a8-798b057d5975", + "metadata": {}, + "outputs": [], + "source": [ + "!mv bge-m3.tar.gz bge-m3 # move the embeddings model tarball into the right directory" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42118141-8339-468b-864f-c2e1b9e5583f", + "metadata": {}, + "outputs": [], + "source": [ + "!tar xzf bge-m3/bge-m3.tar.gz -C bge-m3 # extract the embeddings model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e65d7d0-b5bc-4b02-b5b7-4e8575b77940", + "metadata": {}, + "outputs": [], + "source": [ + "!rm bge-m3/bge-m3.tar.gz # remove the tarball you downloaded" + ] + }, + { + "cell_type": "markdown", + "id": "ea076619-2576-4154-ad50-0775b84a4359", + "metadata": { + "tags": [] + }, + "source": [ + "# Load the Documents\n", + "\n", + "The next cells contain a set of helper functions designed to load JSON documents from a specified directory. These\n", + "functions are essential for preparing your data before embedding it into the high-dimensional latent space. By\n", + "running the following cells, you have a list of documents ready to be processed and embedded in the latent space.\n", + "This forms your corpus." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ab41fcf7-9e8c-4198-9552-da1d23dd3013", + "metadata": {}, + "outputs": [], + "source": [ + "docs = []" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4a1a5db7-b689-437e-b756-60ed7afc2f71", + "metadata": {}, + "outputs": [], + "source": [ + "ezua_loader = JSONLoader(\n", + " file_path='./documents/EzUA.json',\n", + " jq_schema='.[].content',\n", + " text_content=False)\n", + "\n", + "ezua_data = ezua_loader.load()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2f951948-8459-48e1-8187-1ba5b8a4141d", + "metadata": {}, + "outputs": [], + "source": [ + "ezdf_loader = JSONLoader(\n", + " file_path='./documents/EzDF.json',\n", + " jq_schema='.[].content',\n", + " text_content=False)\n", + "\n", + "ezdf_data = ezdf_loader.load()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "97972d83-3d4c-40c4-857b-a2bd8eb4d597", + "metadata": {}, + "outputs": [], + "source": [ + "mlde_loader = JSONLoader(\n", + " file_path='./documents/MLDE.json',\n", + " jq_schema='.[].content',\n", + " text_content=False)\n", + "\n", + "mlde_data = mlde_loader.load()\n", + "\n", + "mlde_data_filtered = list(filter(lambda doc: doc.page_content != \"\", mlde_data))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da034b04-2882-43cd-8fe9-00279747a0b8", + "metadata": {}, + "outputs": [], + "source": [ + "mldm_loader = JSONLoader(\n", + " file_path='./documents/MLDM.json',\n", + " jq_schema='.[].body',\n", + " text_content=False)\n", + "\n", + "mldm_data = mldm_loader.load()\n", + "\n", + "mldm_data_filtered = list(filter(lambda doc: doc.page_content != \"\", mldm_data))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8683547-5a20-4fda-8e54-47ce46a776fc", + "metadata": {}, + "outputs": [], + "source": [ + "docs = ezua_data + ezdf_data + mlde_data_filtered + mldm_data_filtered" + ] + }, + { + "cell_type": "markdown", + "id": "172ce8b8-52d1-468d-a28c-7384b90f553f", + "metadata": { + "tags": [] + }, + "source": [ + "# Document Processing: Chunking Text for the Language Model\n", + "\n", + "In this section of the Notebook, you process the documents by splitting them into chunks. This operation is crucial when\n", + "working with Large Language Models (LLMs), as these models have a maximum limit on the number of tokens (words or pieces\n", + "of words) they can process at once. This limit is often referred to as the model's \"context window\".\n", + "\n", + "In this example, you split each document into segments that are at most `500` tokens long. You use the LangChain's\n", + "`RecursiveCharacterTextSplitter`, which, by default, splits each document when it encounters two consecutive newline\n", + "characters, represented as `\\n\\n`. Furthermore, each segment is distinct, meaning there is no overlap between them." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c389f2b4-9bde-48b7-a87b-eee356c0fae1", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def process_docs(docs: list, chunk_size: int, chunk_overlap: int) -> list:\n", + " \"\"\"Load the documents and split them into chunks.\"\"\"\n", + " text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n", + " texts = text_splitter.split_documents(docs)\n", + " return texts\n", + "\n", + "texts = process_docs(docs, chunk_size=500, chunk_overlap=100)" + ] + }, + { + "cell_type": "markdown", + "id": "aca6e838-1746-4100-bd22-30e03b36c3e5", + "metadata": { + "tags": [] + }, + "source": [ + "# Generating Embeddings & Storing them in Chroma\n", + "\n", + "In this section of the Notebook, you use the embeddings model to transform your documents into semantically\n", + "meaningful vectors.\n", + "\n", + "By leveraging this model and the Chroma database interface provided by LangChain, you can embed your documents into\n", + "a latent space and subsequently store the results in a Vector Store. At this step, the model processes batches of documents\n", + "in order, thus it may take a while to complete." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1d51f4b2-ff73-4bb7-a04c-bd1612db0208", + "metadata": {}, + "outputs": [], + "source": [ + "embeddings = EmbeddingsModel()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e7c988d-dc14-48da-ba95-7232142f81b4", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "db = Chroma(embedding_function=embeddings, persist_directory=f\"{os.getcwd()}/db\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a5478dcc-9ba9-444e-9c0d-1373ece594b7", + "metadata": {}, + "outputs": [], + "source": [ + "batch_size = 100 # Documents to process simultaneously\n", + "\n", + "for i in tqdm(range(0, len(texts), batch_size), desc=\"Processing Batches\"):\n", + " batch = texts[i:i+batch_size]\n", + " db.add_documents(batch)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "02faf0fb-f000-4696-a651-b621527621e9", + "metadata": {}, + "outputs": [], + "source": [ + "db.persist()" + ] + }, + { + "cell_type": "markdown", + "id": "a24f5bcc-c6ee-416c-8893-b30a69f2f054", + "metadata": { + "tags": [] + }, + "source": [ + "Finally, you can test the accuracy of the document retrieval mechanism by providing a simple query. Chroma will return\n", + "with the four most similar documents by default." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a5699cd9-93b3-4513-8cb6-2d492ff5a06f", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "query = \"How can I get started with HPE Ezmeral Unified Anaytics?\"\n", + "matches = db.similarity_search(query); matches" + ] + }, + { + "cell_type": "markdown", + "id": "83cb1b93-567e-4c2e-b3f9-48a3b1d43fc2", + "metadata": { + "tags": [] + }, + "source": [ + "# Conclusion and Next Steps\n", + "\n", + "Congratulations! You have successfully embedded your documents into a high-dimensional latent space\n", + "and stored these embeddings in a Vector Store. By accomplishing this, you've transformed unstructured text data into a\n", + "structured form that can power a robust question-answering system.\n", + "\n", + "However, your journey doesn't end here. Now that you have the Vector Store ready, the next step is to create an\n", + "Inference Service (ISVC) that can leverage this store to provide context to user queries. For this, you use KServe, a\n", + "flexible, cloud-native platform for serving Machine Learning models.\n", + "\n", + "In the next Notebook, you will configure two ISVCs: a custom ISVC using KServe to deploy the Chroma Database, and\n", + "another one backed by with the Triton Inference Service to facilitate the embeddings model connection." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/demos/rag-demos/question-answering-gpu/02.serve-vectorstore.ipynb b/demos/rag-demos/question-answering-gpu/02.serve-vectorstore.ipynb new file mode 100644 index 00000000..392ac05d --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/02.serve-vectorstore.ipynb @@ -0,0 +1,486 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "48b72df6-4961-4128-9281-6c9634dd33fa", + "metadata": { + "tags": [] + }, + "source": [ + "# Creating an Inference Service using MLFlow and KServe\n", + "\n", + "Welcome to part two of the tutorial on building a question-answering application over a private document corpus with\n", + "Large Language Models (LLMs). In the previous Notebook, you embedded the documents into a high-dimensional latent\n", + "space using a fine-tuned BGE-M3 model and saved these embeddings in a Vector Store using the Chroma database interface\n", + "from LangChain.\n", + "\n", + "
\n", + " \"isvc\"\n", + "
\n", + " Photo by Growtika on Unsplash\n", + "
\n", + "
\n", + "\n", + "In this Notebook, you delve deeper. You use MLflow to log the Chroma DB files as experiment artifacts. Once logged, you\n", + "set up an Inference Service (ISVC) that fetches these artifacts and leverages them to provide context to user\n", + "inquiries. For this task, you work with KServe, a Kubernetes-centric platform that offers a serverless blueprint for\n", + "scaling Machine Learning (ML) models seamlessly.\n", + "\n", + "A crucial point to remember: KServe doesn't support Chroma DB files natively. Because of this, you integrate a custom\n", + "predictor component. This involves creating a Docker image, which then serves as your ISVC endpoint. This approach\n", + "grants you a high level of customization, ensuring the service fits your requirements. You can find the necessary code\n", + "and the Dockerfile for this custom predictor in the `dockerfiles/vectorstore` directory. But for a quicker setup,\n", + "there's a pre-built option available: `dpoulopoulos/qna-vectorstore-mlde:v0.1.0`.\n", + "\n", + "Lastly, you must also deploy the embeddings model. You can accomplish this using KServe with the Triton Inference Service\n", + "backend. Triton requires the `model-repository` directory to be organized in a specific manner, which we will discuss in\n", + "more detail later.\n", + "\n", + "## Table of Contents\n", + "\n", + "1. [Logging the Vector Store as an Artifact](#logging-the-vector-store-as-an-artifact)\n", + "1. [Creating and Submitting the Inference Service](#creating-and-submitting-the-inference-service)\n", + "1. [Conclusion and Next Steps](#conclusion-and-next-steps)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e22867e3-a69c-488a-819e-cced462be9e3", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "import base64\n", + "import getpass\n", + "import requests\n", + "import subprocess\n", + "import mlflow\n", + "import ipywidgets as widgets\n", + "\n", + "from IPython.display import display" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7656fd38-0660-402b-bd83-72e566cd4e0f", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "def encode_base64(message: str):\n", + " encoded_bytes = base64.b64encode(message.encode('ASCII'))\n", + " return encoded_bytes.decode('ASCII')" + ] + }, + { + "cell_type": "markdown", + "id": "36a8b07a-15d8-44a2-89c4-266ee61d1a1e", + "metadata": { + "tags": [] + }, + "source": [ + "# Logging the Vector Store as an Artifact\n", + "\n", + "To begin, you create a new experiment or use an existing one and log the Chroma DB files as an artifact of this\n", + "experiment. Ultimately, you retrieve the URI that points to this artifact's location and provide it to the custom\n", + "predictor component. By doing this, the custom predictor component knows how to fetch the artifact and serve it\n", + "effectively." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "27118610-6f87-4bae-ac1d-efbf0b24b6ae", + "metadata": {}, + "outputs": [], + "source": [ + "# Add heading\n", + "heading = widgets.HTML(\"

MLflow Credentials

\")\n", + "display(heading)\n", + "\n", + "domain_input = widgets.Text(description='Username:', placeholder=\"i001ua.tryezmeral.com\")\n", + "username_input = widgets.Text(description='Username:')\n", + "password_input = widgets.Password(description='Password:')\n", + "submit_button = widgets.Button(description='Submit')\n", + "success_message = widgets.Output()\n", + "\n", + "domain = None\n", + "mlflow_username = None\n", + "mlflow_password = None\n", + "\n", + "def submit_button_clicked(b):\n", + " global domain, mlflow_username, mlflow_password\n", + " domain = domain_input.value\n", + " mlflow_username = username_input.value\n", + " mlflow_password = password_input.value\n", + " with success_message:\n", + " success_message.clear_output()\n", + " print(\"Credentials submitted successfully!\")\n", + " submit_button.disabled = True\n", + "\n", + "submit_button.on_click(submit_button_clicked)\n", + "\n", + "# Set margin on the submit button\n", + "submit_button.layout.margin = '20px 0 20px 0'\n", + "\n", + "# Display inputs and button\n", + "display(domain_input, username_input, password_input, submit_button, success_message)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b03cb65-26ac-4ec0-b88b-18bc3aee9152", + "metadata": {}, + "outputs": [], + "source": [ + "token_url = f\"https://keycloak.{domain}/realms/UA/protocol/openid-connect/token\"\n", + "\n", + "data = {\n", + " \"username\" : mlflow_username,\n", + " \"password\" : mlflow_password,\n", + " \"grant_type\" : \"password\",\n", + " \"client_id\" : \"ua-grant\",\n", + "}\n", + "\n", + "token_responce = requests.post(token_url, data=data, allow_redirects=True, verify=False)\n", + "\n", + "token = token_responce.json()[\"access_token\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b59a05cb-1e9a-47c8-bf85-f169a0ccff84", + "metadata": {}, + "outputs": [], + "source": [ + "os.environ['MLFLOW_TRACKING_TOKEN'] = token\n", + "os.environ[\"AWS_ACCESS_KEY_ID\"] = os.environ['MLFLOW_TRACKING_TOKEN']\n", + "os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"s3\"\n", + "os.environ[\"AWS_ENDPOINT_URL\"] = 'http://local-s3-service.ezdata-system.svc.cluster.local:30000'\n", + "os.environ[\"MLFLOW_S3_ENDPOINT_URL\"] = os.environ[\"AWS_ENDPOINT_URL\"]\n", + "os.environ[\"MLFLOW_S3_IGNORE_TLS\"] = \"true\"\n", + "os.environ[\"MLFLOW_TRACKING_INSECURE_TLS\"] = \"true\"\n", + "os.environ[\"MLFLOW_TRACKING_URI\"] = \"http://mlflow.mlflow.svc.cluster.local:5000\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e23d780-57e0-4d5c-a699-aa3046a5b586", + "metadata": {}, + "outputs": [], + "source": [ + "def get_or_create_experiment(exp_name):\n", + " \"\"\"Register an experiment in MLFlow.\n", + " \n", + " args:\n", + " exp_name (str): The name of the experiment.\n", + " \"\"\"\n", + " try:\n", + " mlflow.set_experiment(exp_name)\n", + " except Exception as e:\n", + " raise RuntimeError(f\"Failed to set the experiment: {e}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "439f97e2-4c39-4464-b3e8-c778b96d28da", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Create a new MLFlow experiment or re-use an existing one\n", + "get_or_create_experiment('mlde')\n", + "\n", + "# Log the Chroma DB files as an artifact of the experiment\n", + "mlflow.log_artifact(f\"{os.getcwd()}/db\")\n", + "\n", + "# Retrieve the URI of the artifact\n", + "uri = mlflow.get_artifact_uri(\"db\")" + ] + }, + { + "cell_type": "markdown", + "id": "b5f6da2b-a621-4837-b202-9a6f854d3999", + "metadata": {}, + "source": [ + "# Creating the Triton Model Repository\n", + "\n", + "Next, you will deploy the embeddings model using KServe alongside the Triton Inference Server backend. For this purpose, organizing the `model-repository` directory is essential, as Triton relies on it to load and deploy your models.\n", + "\n", + "The fundamental layout of this directory is pre-arranged for you. You can explore its contents by delving into the `model-repository` directory. Beyond that, your task is simply to relocate the model to a designated spot and replicate the entire `model-repository` directory onto the shared Persistent Volume Claim (PVC). The pod running the Inference Service will then mount this PVC and automatically retrieve the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "38c7648a-30a8-4ec9-90aa-c0186fbd5972", + "metadata": {}, + "outputs": [], + "source": [ + "!mv bge-m3 model-repository/bge/1/bge-m3" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d9d5e099-45ca-4ee9-9a99-be6ed5832d72", + "metadata": {}, + "outputs": [], + "source": [ + "!cp -r model-repository/ /mnt/shared/" + ] + }, + { + "cell_type": "markdown", + "id": "534dcf6e-58ca-4905-a600-a2ca98e4d32a", + "metadata": {}, + "source": [ + "Finally, you are ready to define the Inference Service CR:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b66c75e5-8be7-41bf-b2b4-96391cec61f6", + "metadata": {}, + "outputs": [], + "source": [ + "sgpt_isvc = \"\"\"\n", + "apiVersion: \"serving.kserve.io/v1beta1\"\n", + "kind: \"InferenceService\"\n", + "metadata:\n", + " name: \"bge\"\n", + "spec:\n", + " predictor:\n", + " timeout: 600\n", + " triton:\n", + " image: dpoulopoulos/triton-inference-server:v0.1.0\n", + " securityContext:\n", + " runAsUser: 0\n", + " resources:\n", + " limits:\n", + " cpu: \"2\"\n", + " memory: 8Gi\n", + " nvidia.com/gpu: 1\n", + " requests:\n", + " cpu: \"2\"\n", + " memory: 8Gi\n", + " storageUri: \"pvc://kubeflow-shared-pvc/model-repository\"\n", + "\"\"\"\n", + "\n", + "with open(\"sgpt-isvc.yaml\", \"w\") as f:\n", + " f.write(sgpt_isvc)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0396b3ca-afe6-4476-935c-269ddd40b155", + "metadata": {}, + "outputs": [], + "source": [ + "subprocess.run([\"kubectl\", \"apply\", \"-f\", \"sgpt-isvc.yaml\"])" + ] + }, + { + "cell_type": "markdown", + "id": "23c25e4d-4803-4464-80d9-6b62ad2b4d49", + "metadata": { + "tags": [] + }, + "source": [ + "# Creating and Submitting the Inference Service\n", + "\n", + "In the final segment of this Notebook, you create and submit an ISVC via a YAML template and a Python subprocess. This\n", + "process unfolds as follows:\n", + "\n", + "1. Drafting the YAML Template: Here, you craft a YAML file that outlines the ISVC's specifics. This captures elements\n", + " like the service's name, the chosen Docker image, and additional configurations. After drafting, you save this YAML\n", + " to a file for inspection and later submission.\n", + "1. Applying the YAML Template: With your YAML template prepped, the next step is to present it to KServe for deployment.\n", + " You accomplish this by leveraging a Python subprocess to execute a shell command.\n", + "\n", + "By the end of this section, you will have a running ISVC that is ready to receive user queries and provide context for\n", + "answering them using the Vector Store. This marks the completion of your journey, from transforming unstructured text\n", + "data into structured vector embeddings, to creating a scalable service that can provide context based on those\n", + "embeddings.\n", + "\n", + "In the upcoming cell, input the name of the Docker image you constructed in the initial phase. If you wish to utilize\n", + "the pre-fabricated one, simply leave the field untouched:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6fd164d7-7bd4-4ae7-b4c2-9e99f1a5f069", + "metadata": {}, + "outputs": [], + "source": [ + "# Add heading\n", + "heading = widgets.HTML(\"

Predictor Image

\")\n", + "display(heading)\n", + "\n", + "predictor_image_widget = widgets.Text(\n", + " description=\"Image Name:\",\n", + " placeholder=\"Default: dpoulopoulos/qna-vectorstore-mlde:v0.1.0\",\n", + " layout=widgets.Layout(width='30%'))\n", + "submit_button = widgets.Button(description=\"Submit\")\n", + "success_message = widgets.Output()\n", + "\n", + "predictor_image = None\n", + "\n", + "def submit_button_clicked(b):\n", + " global predictor_image\n", + " predictor_image = predictor_image_widget.value\n", + " with success_message:\n", + " success_message.clear_output()\n", + " if not predictor_image:\n", + " predictor_image = \"dpoulopoulos/qna-vectorstore-mlde:v0.1.0\"\n", + " print(f\"The name of the predictor image will be: '{predictor_image}'\")\n", + " submit_button.disabled = True\n", + "\n", + "submit_button.on_click(submit_button_clicked)\n", + "\n", + "# Set margin on the submit button\n", + "submit_button.layout.margin = '20px 0 20px 0'\n", + "\n", + "# Display inputs and button\n", + "display(predictor_image_widget, submit_button, success_message)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e05d4c9d-72e4-491a-ad99-f6f533f0ef94", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "isvc = \"\"\"\n", + "apiVersion: v1\n", + "kind: Secret\n", + "metadata:\n", + " name: minio-secret\n", + "type: Opaque\n", + "data:\n", + " MINIO_ACCESS_KEY: {0}\n", + " MINIO_SECRET_KEY: {1}\n", + "\n", + "---\n", + "apiVersion: serving.kserve.io/v1beta1\n", + "kind: InferenceService\n", + "metadata:\n", + " name: vectorstore\n", + "spec:\n", + " predictor:\n", + " containers:\n", + " - name: kserve-container\n", + " image: {2}\n", + " imagePullPolicy: Always\n", + " resources:\n", + " requests:\n", + " memory: \"2Gi\"\n", + " cpu: \"500m\"\n", + " limits:\n", + " memory: \"2Gi\"\n", + " cpu: \"500m\"\n", + " args:\n", + " - --persist-uri\n", + " - {3}\n", + " env:\n", + " - name: MLFLOW_S3_ENDPOINT_URL\n", + " value: {4}\n", + " - name: TRANSFORMERS_CACHE\n", + " value: /src\n", + " - name: SENTENCE_TRANSFORMERS_HOME\n", + " value: /src\n", + " - name: MINIO_ACCESS_KEY\n", + " valueFrom:\n", + " secretKeyRef:\n", + " key: MINIO_ACCESS_KEY\n", + " name: minio-secret\n", + " - name: MINIO_SECRET_KEY\n", + " valueFrom:\n", + " secretKeyRef:\n", + " key: MINIO_SECRET_KEY\n", + " name: minio-secret\n", + "\"\"\".format(encode_base64(os.environ[\"AWS_ACCESS_KEY_ID\"]),\n", + " encode_base64(os.environ[\"AWS_SECRET_ACCESS_KEY\"]),\n", + " predictor_image,\n", + " uri,\n", + " os.environ[\"MLFLOW_S3_ENDPOINT_URL\"])\n", + "\n", + "with open(\"vectorstore-isvc.yaml\", \"w\") as f:\n", + " f.write(isvc)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "68631d06-ea76-4159-a08b-57e76850ff56", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "subprocess.run([\"kubectl\", \"apply\", \"-f\", \"vectorstore-isvc.yaml\"])" + ] + }, + { + "cell_type": "markdown", + "id": "bc4e2d08-8e09-4c9e-826c-0f0dfdc2d3f3", + "metadata": { + "tags": [] + }, + "source": [ + "# Conclusion and Next Steps\n", + "\n", + "Congratulations! You've successfully navigated through the process of logging the Chroma DB files as artifacts using\n", + "MLflow, creating a custom Docker image, and setting up an ISVC with KServe that retrieves these artifacts to serve your\n", + "Vector Store. This ISVC forms the backbone of your question-answering application, enabling you to efficiently answer\n", + "queries based on the document embeddings we generated previously.\n", + "\n", + "From here, there are two paths you can choose:\n", + "\n", + "- **Testing the Vector Store ISVC**: If you'd like to test the Vector Store ISVC that you've just created, you can proceed\n", + " to the third (optional) Notebook. This Notebook provides a step-by-step guide on how to invoke the ISVC and validate\n", + " its performance.\n", + "- **Creating the LLM ISVC**: Alternatively, if you're ready to move on to the next stage of the project, you\n", + " can jump straight to our fourth Notebook. In this Notebook, you create an ISVC for the Large Language Model (LLM),\n", + " which will work in conjunction with the Vector Store ISVC to provide answers to user queries." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/demos/rag-demos/question-answering-gpu/03.document-prediction.ipynb b/demos/rag-demos/question-answering-gpu/03.document-prediction.ipynb new file mode 100644 index 00000000..2a520bdf --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/03.document-prediction.ipynb @@ -0,0 +1,225 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "972db2df-307f-4492-80c6-e84082d778f2", + "metadata": { + "tags": [] + }, + "source": [ + "# Invoking and Testing the Vector Store Inference Service (Optional)\n", + "\n", + "Welcome to the third part of the tutorial series on building a question-answering application over a corpus of private\n", + "documents using Large Language Models (LLMs). In the previous Notebooks, you've transformed unstructured text data into\n", + "structured vector embeddings, stored them in a Vector Store, deployed an Inference Service (ISVC) to serve the Vector Store,\n", + "and deploy the fine-tuned embeddings model using KServe and Triton.\n", + "\n", + "In this Notebook, you focus on invoking the Vector Store ISVC you've created and testing its performance. This\n", + "is an essential step, as it allows you to verify the functionality of your service and observe how it performs in\n", + "practice. Throughout this Notebook, you construct suitable requests, communicate with the service, and interpret the\n", + "responses.\n", + "\n", + "By the end of this Notebook, you will gain practical insights into the workings of the Vector Store ISVC and will be\n", + "well-prepared to integrate it into a larger system, alongside the LLM ISVC that you create in the subsequent Notebook.\n", + "\n", + "## Table of Contents\n", + "\n", + "1. [Invoke the Inference Service](#invoke-the-inference-service)\n", + "1. [Conclusion and Next Steps](#conclusion-and-next-steps)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "428fd850-d35a-476f-ba05-b11763ddec68", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "import json\n", + "import getpass\n", + "import requests\n", + "import ipywidgets as widgets\n", + "\n", + "from IPython.display import display" + ] + }, + { + "cell_type": "markdown", + "id": "f1f8bb43-af00-4dae-bf22-dec236bcafe7", + "metadata": { + "tags": [] + }, + "source": [ + "# Invoke the Inference Service\n", + "\n", + "First, you need to construct the URL you use in POST request. For this example, you use the V1 inference protocol,\n", + "described below:\n", + "\n", + "| API | Verb | Path | Request Payload | Response Payload |\n", + "|--------------|------|-------------------------------|-------------------|-----------------------------------|\n", + "| List Models | GET | /v1/models | | {\"models\": []} |\n", + "| Model Ready | GET | /v1/models/ | | {\"name\": ,\"ready\": $bool} |\n", + "| Predict | POST | /v1/models/:predict | {\"instances\": []}* | {\"predictions\": []} |\n", + "| Explain | POST | /v1/models/:explain | {\"instances\": []}* | {\"predictions\": [], \"explanations\": []} |\n", + "\n", + "\\* Payload is optional\n", + "\n", + "You want to invoke the `predict` API. So let's use a simple query to test the service:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e060904d-a7b8-4835-99d6-f9890b6afbf9", + "metadata": {}, + "outputs": [], + "source": [ + "# Add heading\n", + "heading = widgets.HTML(\"

Credentials

\")\n", + "display(heading)\n", + "\n", + "domain_input = widgets.Text(description='Username:', placeholder=\"i001ua.tryezmeral.com\")\n", + "username_input = widgets.Text(description='Username:')\n", + "password_input = widgets.Password(description='Password:')\n", + "submit_button = widgets.Button(description='Submit')\n", + "success_message = widgets.Output()\n", + "\n", + "domain = None\n", + "username = None\n", + "password = None\n", + "\n", + "def submit_button_clicked(b):\n", + " global domain, username, password\n", + " domain = domain_input.value\n", + " username = username_input.value\n", + " password = password_input.value\n", + " with success_message:\n", + " success_message.clear_output()\n", + " print(\"Credentials submitted successfully!\")\n", + " submit_button.disabled = True\n", + "\n", + "submit_button.on_click(submit_button_clicked)\n", + "\n", + "# Set margin on the submit button\n", + "submit_button.layout.margin = '20px 0 20px 0'\n", + "\n", + "# Display inputs and button\n", + "display(domain_input, username_input, password_input, submit_button, success_message)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4c546186-ac65-457b-b4fd-bb86767cc3d4", + "metadata": {}, + "outputs": [], + "source": [ + "token_url = f\"https://keycloak.{domain}/realms/UA/protocol/openid-connect/token\"\n", + "\n", + "data = {\n", + " \"username\" : username,\n", + " \"password\" : password,\n", + " \"grant_type\" : \"password\",\n", + " \"client_id\" : \"ua-grant\",\n", + "}\n", + "\n", + "token_responce = requests.post(token_url, data=data, allow_redirects=True, verify=False)\n", + "\n", + "token = token_responce.json()[\"access_token\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "173e2ebd-5e3b-4289-8358-9406ba816921", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "DOMAIN_NAME = \"svc.cluster.local\"\n", + "NAMESPACE = \"bob\"\n", + "DEPLOYMENT_NAME = \"vectorstore-predictor\"\n", + "MODEL_NAME = \"vectorstore\"\n", + "SVC = f'{DEPLOYMENT_NAME}.{NAMESPACE}.{DOMAIN_NAME}'\n", + "URL = f\"https://{SVC}/v1/models/{MODEL_NAME}:predict\"\n", + "\n", + "print(URL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78da091c-9fce-4f91-8382-e5c785bdf24f", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "data = {\n", + " \"instances\": [{\n", + " \"input\": \"How can I get started with HPE Ezmeral Unified Anaytics?\",\n", + " \"num_docs\": 4 # number of documents to retrieve\n", + " }]\n", + "}\n", + "\n", + "headers = {\"Authorization\": f\"Bearer {token}\"}\n", + "\n", + "response = requests.post(URL, json=data, headers=headers, verify=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "461fdac2-cacb-40cc-bf2d-d1548072bb90", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "result = json.loads(response.text)[\"predictions\"]; result" + ] + }, + { + "cell_type": "markdown", + "id": "0e6c9e0e-6d17-4d15-ba4e-e353cc1cd3c2", + "metadata": { + "tags": [] + }, + "source": [ + "# Conclusion and Next Steps\n", + "\n", + "Well done! Through this Notebook, you've successfully interacted with and tested the Vector Store ISVC. You've learned\n", + "how to construct and send requests to the service and how to interpret the responses. This hands-on experience is\n", + "crucial as it provides a practical understanding of the service's operation, preparing you for real-world applications.\n", + "\n", + "In the next Notebook, you extend your question-answering system by creating an ISVC for the LLM. The LLM ISVC works in\n", + "conjunction with the Vector Store ISVC to provide comprehensive and accurate answers to user queries." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/demos/rag-demos/question-answering-gpu/04.serve-llm.ipynb b/demos/rag-demos/question-answering-gpu/04.serve-llm.ipynb new file mode 100644 index 00000000..7f896c48 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/04.serve-llm.ipynb @@ -0,0 +1,355 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f891729d-9030-4d9e-a7be-e81386ae820f", + "metadata": { + "tags": [] + }, + "source": [ + "# Creating a Large Language Model Inference Service\n", + "\n", + "Welcome to the fourth part of the tutorial series on building a question-answering application over a corpus of private\n", + "documents using Large Language Models (LLMs). The previous Notebooks walked you through the processes of creating\n", + "vector embeddings of the documents, setting up the Inference Services (ISVCs) for the Vector Store and the Embeddings model,\n", + "and testing the performance of the information retrieval system.\n", + "\n", + "
\n", + " \"llm\"\n", + "
\n", + " Photo by Google DeepMind on Unsplash\n", + "
\n", + "
\n", + "\n", + "Now, you're moving towards the next crucial step: creating an ISVC for the LLM. This ISVC is the centerpiece of the\n", + "question-answering system, working in tandem with the Vector Store ISVC to deliver comprehensive and accurate answers to\n", + "user queries.\n", + "\n", + "In this Notebook, you set up this LLM ISVC. You learn how to build a Docker image for the transformer component and its\n", + "role, define a KServe ISVC YAML file, and deploy the service. By the end of this Notebook, you'll have a fully functioning\n", + "LLM ISVC that can accept user queries, interact with the Vector Store, and provide insightful responses.\n", + "\n", + "To complete this step you need to have access to the Llama 2 model on Hugging Face Hub and the pre-built TensorRT-LLM engines:\n", + "\n", + "* The TensorRT-LLM engines can be downloaded from the following link: https://ezmeral-artifacts.s3.us-east-2.amazonaws.com/llama-engines.tar.gz.\n", + " We will do this later in the Notebook.\n", + "* The model repository is available via the Hugging Face hub at https://huggingface.co/meta-llama/Llama-2-7b-chat-hf.\n", + " It is not necessary to clone the model weights, thus eliminating the need for Git LFS. You should sign in to Hugging Face and accept the terms\n", + " and conditions for using this model.\n", + "\n", + "## Table of Contents\n", + "\n", + "1. [Architecture](#architecture)\n", + "1. [Creating the Inference Service](#creating-the-inference-service)\n", + "1. [Conclusion and Next Steps](#conclusion-and-next-steps)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e22867e3-a69c-488a-819e-cced462be9e3", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "import subprocess\n", + "import ipywidgets as widgets\n", + "\n", + "from IPython.display import display" + ] + }, + { + "cell_type": "markdown", + "id": "83e0cc08-1c00-49cc-85bd-b792383b26e7", + "metadata": { + "tags": [] + }, + "source": [ + "# Architecture\n", + "\n", + "In this setup, an additional component, called a \"transformer\", plays a pivotal role in processing user queries and\n", + "integrating the Vector Store ISVC with the LLM ISVC. The transformer's role is to intercept the user's request, extract\n", + "the necessary information, and then communicate with the Vector Store ISVC to retrieve the relevant context. The\n", + "transformer then takes the response of the Vector Store ISVC (i.e., the context), combines it with the user's query, and\n", + "forwards the enriched prompt to the LLM predictor.\n", + "\n", + "Here's a detailed look at the process:\n", + "\n", + "1. **Intercepting the User's Request**: The transformer acts as a gateway between the user and the LLM ISVC. When a user\n", + " sends a query, it first reaches the transformer. The transformer extracts the query from the request.\n", + "1. **Communicating with the Vector Store ISVC**: The transformer then takes the user's query and sends a POST request to the\n", + " Vector Store ISVC including the user's query in the payload, just like you did in the previous Notebook.\n", + "1. **Receiving and Processing the Context**: The Vector Store ISVC responds by sending back the relevant context.\n", + "1. **Combining the Context with the User's Query**: The transformer then combines the received context with the user's\n", + " original query using a prompt template. This creates an enriched prompt that contains both the user's original\n", + " question and the relevant context from our documents.\n", + "1. **Forwarding the Enriched Query to the LLM Predictor**: Finally, the transformer forwards this enriched query to the LLM\n", + " predictor. The predictor then processes this query and generates a response, which is sent back to the transformer.\n", + " Steps 2 through 5 are transparent to the user.\n", + "1. **Final response**:The transformer returns the response to the user.\n", + "\n", + "As such, you should build one custom Docker image at this point for the transformer component. The\n", + "source code and the Dockerfile is provided in the corresponding folder: `dockerfiles/transformer`.\n", + "For your convenience, you can use the image we have pre-built for you: `dpoulopoulos/qna-transformer-mlde:v0.1.0`\n", + "\n", + "Once ready, proceed with the next steps." + ] + }, + { + "cell_type": "markdown", + "id": "7ab9f01f-bdcf-4a5b-a466-cb44005f4dab", + "metadata": {}, + "source": [ + "# Downloading the Artifacts\n", + "\n", + "Next, you should download the necessary artifacts for the model. The first step is to clone the model repository. Visit the Llama 2 7B page on Hugging Face Hub and clone the model in this directory, using the following command:\n", + "\n", + "```\n", + "GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/meta-llama/Llama-2-7b-chat-hf\n", + "```\n", + "\n", + "You should provide your username and the access token for your accound. If you don't have an access token you should create a new one. When everything's ready, run the following command to move the Llama 2 7B directory you pulled in the right location:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fa75f3a7-a403-486b-84b7-66c98e0c2d42", + "metadata": {}, + "outputs": [], + "source": [ + "!mv Llama-2-7b-chat-hf/ inflight_batcher_llm/preprocessing/1/" + ] + }, + { + "cell_type": "markdown", + "id": "ba2d445f-50f7-497b-a557-869f34f0871b", + "metadata": {}, + "source": [ + "Next, let's download the pre-built TensorRT-LLM engines:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5f06b8c-1f9e-4eec-a18d-d934a763797b", + "metadata": {}, + "outputs": [], + "source": [ + "# If you are behind a proxy, do not forget to set your `https_proxy` and `HTTPS_PROXY` environment variable.\n", + "# os.environ[\"https_proxy\"] = \"\"\n", + "# os.environ[\"HTTPS_PROXY\"] = \"\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40598e5c-2f3c-4ca5-a9af-f98492cd1df0", + "metadata": {}, + "outputs": [], + "source": [ + "!wget https://ezmeral-artifacts.s3.us-east-2.amazonaws.com/llama-engines.tar.gz # download the tarball" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "03d6d7f1-c567-4e82-b0fd-31d40844e662", + "metadata": {}, + "outputs": [], + "source": [ + "!mv llama-engines.tar.gz inflight_batcher_llm/tensorrt_llm/1/ # move the tarball to the right location" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4d3860bb-043d-4b92-9b5a-d2a306234011", + "metadata": {}, + "outputs": [], + "source": [ + "!tar xzf inflight_batcher_llm/tensorrt_llm/1/llama-engines.tar.gz -C inflight_batcher_llm/tensorrt_llm/1/ # extract the tarball" + ] + }, + { + "cell_type": "markdown", + "id": "c8b51ccb-f1d3-4132-a88b-62b84d90b11a", + "metadata": {}, + "source": [ + "# Creating the Inference Service\n", + "\n", + "As before, you need to provide the name of the transofmer image You can leave any field empty to use the image we\n", + "provide for you:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6db302a3-e35f-4bb3-a2c5-24ee637c2230", + "metadata": {}, + "outputs": [], + "source": [ + "# Add heading\n", + "heading = widgets.HTML(\"

Transformer Image

\")\n", + "display(heading)\n", + "\n", + "transformer_image_widget = widgets.Text(\n", + " description=\"Image Name:\",\n", + " placeholder=\"Default: dpoulopoulos/qna-transformer-mlde:v0.1.0\",\n", + " layout=widgets.Layout(width='30%'))\n", + "submit_button = widgets.Button(description=\"Submit\")\n", + "success_message = widgets.Output()\n", + "\n", + "transformer_image = None\n", + "\n", + "def submit_button_clicked(b):\n", + " global transformer_image\n", + " transformer_image = transformer_image_widget.value\n", + " with success_message:\n", + " success_message.clear_output()\n", + " if not transformer_image:\n", + " transformer_image = \"dpoulopoulos/qna-transformer-mlde:v0.1.0\"\n", + " print(f\"The name of the transformer image will be: '{transformer_image}'\")\n", + " submit_button.disabled = True\n", + "\n", + "submit_button.on_click(submit_button_clicked)\n", + "\n", + "# Set margin on the submit button\n", + "submit_button.layout.margin = '20px 0 20px 0'\n", + "\n", + "# Display inputs and button\n", + "display(transformer_image_widget, submit_button, success_message)" + ] + }, + { + "cell_type": "markdown", + "id": "bee3b154-ee58-4f55-b2b3-27c58a5b085c", + "metadata": {}, + "source": [ + "Copy the `inflight_batcher_llm` directory to the shared PVC. The `inflight_batcher_llm` directory defines the structure or the model repository that the Triton Inference Server expects to serve Llama 2 7B, using the TensorRT-LLM backend." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7cb8f88b-a7fd-4dd8-b6f7-e72428103528", + "metadata": {}, + "outputs": [], + "source": [ + "!cp -r inflight_batcher_llm/ /mnt/shared/" + ] + }, + { + "cell_type": "markdown", + "id": "f545db2b-c5a1-4dfd-a382-3b4d964e9d79", + "metadata": {}, + "source": [ + "Define and apply the LLM Inference Service:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e05d4c9d-72e4-491a-ad99-f6f533f0ef94", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "llama_isvc = \"\"\"\n", + "apiVersion: \"serving.kserve.io/v1beta1\"\n", + "kind: \"InferenceService\"\n", + "metadata:\n", + " name: \"ensemble\"\n", + "spec:\n", + " predictor:\n", + " timeout: 600\n", + " triton:\n", + " image: nvcr.io/nvidia/tritonserver:24.01-trtllm-python-py3\n", + " securityContext:\n", + " runAsUser: 0\n", + " resources:\n", + " limits:\n", + " cpu: \"4\"\n", + " memory: 32Gi\n", + " nvidia.com/gpu: 1\n", + " requests:\n", + " cpu: \"4\"\n", + " memory: 32Gi\n", + " storageUri: \"pvc://kubeflow-shared-pvc/inflight_batcher_llm\"\n", + " transformer:\n", + " timeout: 600\n", + " containers:\n", + " - image: {0}\n", + " imagePullPolicy: Always\n", + " resources:\n", + " requests:\n", + " memory: \"1Gi\"\n", + " cpu: \"500m\"\n", + " limits:\n", + " memory: \"1Gi\"\n", + " cpu: \"500m\"\n", + " name: kserve-container\n", + " args: [\"--protocol\", \"v2\"]\n", + "\"\"\".format(transformer_image)\n", + "\n", + "with open(\"llama-isvc.yaml\", \"w\") as f:\n", + " f.write(llama_isvc)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "68631d06-ea76-4159-a08b-57e76850ff56", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "subprocess.run([\"kubectl\", \"apply\", \"-f\", \"llama-isvc.yaml\"])" + ] + }, + { + "cell_type": "markdown", + "id": "20235af7-7b47-4b68-8f6d-f0b01b0c23e5", + "metadata": { + "tags": [] + }, + "source": [ + "# Conclusion and Next Steps\n", + "\n", + "Congratulations on completing this crucial step in this tutorial series! You've successfully built an LLM ISVC, and\n", + "you've learned about the role of a transformer in enriching user queries with relevant context from our documents.\n", + "Together with the Vector Store ISVC, these components form the backbone of your question-answering application.\n", + "\n", + "However, the journey doesn't stop here. The next and final step is to test the LLM ISVC, ensuring that it's working as\n", + "expected and delivering accurate responses. This will help you gain confidence in your setup and prepare you for\n", + "real-world applications. In the next Notebook, you invoke the LLM ISVC. You see how to construct suitable requests,\n", + "communicate with the service, and interpret the responses." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/demos/rag-demos/question-answering-gpu/05.question-answering.ipynb b/demos/rag-demos/question-answering-gpu/05.question-answering.ipynb new file mode 100644 index 00000000..e62059e6 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/05.question-answering.ipynb @@ -0,0 +1,227 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "286558bc-9780-4399-ae75-8055b8e0a9d3", + "metadata": { + "tags": [] + }, + "source": [ + "# Invoking and Testing the Large Language Model Inference Service\n", + "\n", + "Welcome to the fifth and final part of the tutorial series on building a question-answering application over a corpus of\n", + "private documents using Large Language Models (LLMs). In the previous Notebooks, you've covered the processes of\n", + "creating vector embeddings of our documents, deploying a Vector Store Inference Service (ISVC), creating an LLM ISVC,\n", + "and enriching user queries with relevant context using an ISVC Transformer component.\n", + "\n", + "In this Notebook, you focus on the crucial task of invoking and testing the LLM ISVC you've created. This is an\n", + "important step in the development process as it allows you to validate the functionality and performance of your\n", + "service in a practical setting.\n", + "\n", + "Throughout this Notebook, you see how to construct and send requests to the LLM ISVC, interpret the responses, and\n", + "handle potential issues that might arise. By the end of this Notebook, you will have gained practical experience in\n", + "working with the LLM ISVC, preparing you to integrate it into larger systems or applications.\n", + "\n", + "## Table of Contents\n", + "\n", + "1. [Invoking the LLM Inference Service](#invoking-the-llm-inference-service)\n", + "1. [Conclusion](#conclusion)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "428fd850-d35a-476f-ba05-b11763ddec68", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import os\n", + "import json\n", + "import getpass\n", + "import requests\n", + "import ipywidgets as widgets\n", + "\n", + "from IPython.display import display" + ] + }, + { + "cell_type": "markdown", + "id": "dc23caa9-d988-4ae0-afd6-094a5a09c82a", + "metadata": { + "tags": [] + }, + "source": [ + "# Invoking the LLM Inference Service\n", + "\n", + "You are now ready to test your service. Provide your question and get back the answer from the LLM inference service." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9bf1f7a1-c9d9-4ffd-8cdb-801436a83229", + "metadata": {}, + "outputs": [], + "source": [ + "# Add heading\n", + "heading = widgets.HTML(\"

Credentials

\")\n", + "display(heading)\n", + "\n", + "domain_input = widgets.Text(description='Username:', placeholder=\"i001ua.tryezmeral.com\")\n", + "username_input = widgets.Text(description='Username:')\n", + "password_input = widgets.Password(description='Password:')\n", + "submit_button = widgets.Button(description='Submit')\n", + "success_message = widgets.Output()\n", + "\n", + "domain = None\n", + "username = None\n", + "password = None\n", + "\n", + "def submit_button_clicked(b):\n", + " global domain, username, password\n", + " domain = domain_input.value\n", + " username = username_input.value\n", + " password = password_input.value\n", + " with success_message:\n", + " success_message.clear_output()\n", + " print(\"Credentials submitted successfully!\")\n", + " submit_button.disabled = True\n", + "\n", + "submit_button.on_click(submit_button_clicked)\n", + "\n", + "# Set margin on the submit button\n", + "submit_button.layout.margin = '20px 0 20px 0'\n", + "\n", + "# Display inputs and button\n", + "display(domain_input, username_input, password_input, submit_button, success_message)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "64c58e63-c0d8-419a-b393-15251cf09ac4", + "metadata": {}, + "outputs": [], + "source": [ + "token_url = f\"https://keycloak.{domain}/realms/UA/protocol/openid-connect/token\"\n", + "\n", + "data = {\n", + " \"username\" : username,\n", + " \"password\" : password,\n", + " \"grant_type\" : \"password\",\n", + " \"client_id\" : \"ua-grant\",\n", + "}\n", + "\n", + "token_responce = requests.post(token_url, data=data, allow_redirects=True, verify=False)\n", + "\n", + "token = token_responce.json()[\"access_token\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "173e2ebd-5e3b-4289-8358-9406ba816921", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "DOMAIN_NAME = \"svc.cluster.local\" # change this to your domain for external access\n", + "NAMESPACE = \"bob\"\n", + "DEPLOYMENT_NAME = \"ensemble\"\n", + "MODEL_NAME = DEPLOYMENT_NAME\n", + "SVC = f'{DEPLOYMENT_NAME}-transformer.{NAMESPACE}.{DOMAIN_NAME}'\n", + "URL = f\"https://{SVC}/v1/models/{MODEL_NAME}:predict\"\n", + "\n", + "print(URL)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "78da091c-9fce-4f91-8382-e5c785bdf24f", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "data = {\n", + " \"instances\": [{\n", + " \"input\": \"What are the major deep learning frameworks that Determined is compatible with?\",\n", + " \"max_tokens\": 100,\n", + " \"top_k\": 40,\n", + " \"top_p\": 0.4,\n", + " \"num_docs\": 4,\n", + " \"temperature\": 0.1\n", + " }]\n", + "}\n", + "\n", + "headers = {\"Authorization\": f\"Bearer {token}\"}\n", + "\n", + "response = requests.post(URL, json=data, headers=headers, verify=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "461fdac2-cacb-40cc-bf2d-d1548072bb90", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "text = json.loads(response.text)[\"outputs\"][2][\"data\"][0];\n", + "\n", + "trimmed_text = text.split(\"[INST]\", 1)[-1].split(\"[/INST]\", 1)[0] if \"[INST]\" in text and \"[/INST]\" in text else text\n", + "result = text.replace(trimmed_text, \"\").replace(\"[INST]\", \"\").replace(\"[/INST]\", \"\").strip()\n", + "\n", + "print(result)" + ] + }, + { + "cell_type": "markdown", + "id": "501663ea-9a56-4fca-a63a-69fabe51ec34", + "metadata": { + "tags": [] + }, + "source": [ + "# Conclusion\n", + "\n", + "Congratulations on reaching the finish line of this comprehensive tutorial! You've successfully developed an application\n", + "capable of delivering responses to user queries in a natural language format. The journey has not only enhanced your\n", + "understanding but also allowed you to acquire hands-on experience in various facets of LLMs.\n", + "\n", + "Throughout this process, you've demystified the concept of a Vector Store, created custom predictor and transformer\n", + "components, and learned to log artifacts with MLflow. Moreover, all these tasks have been accomplished within the\n", + "comfortable and familiar confines of your JupyterLab environment.\n", + "\n", + "In conclusion, you've taken significant strides in your journey of mastering LLMs, and how to create real-world\n", + "applications using the EzUA platform. As a next step, you can follow the instructions in the README file and deploy the\n", + "front-end application of this service." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/demos/rag-demos/question-answering-gpu/06.finetune-model.ipynb b/demos/rag-demos/question-answering-gpu/06.finetune-model.ipynb new file mode 100644 index 00000000..7ccfc20e --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/06.finetune-model.ipynb @@ -0,0 +1,418 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Finetune the Embedding Model on MLDE\n", + "\n", + "### Dataset preparation\n", + "To make our Retrieval Augmented Generation (RAG) Application more effective, we can fine tune our embedding model on our dataset to make it better at retrieving the right chunks when we ask a question. The dataset we need to train it on would be pairs of questions and the chunk it should help retrieve. We have our data in json format, so the first think we need to do is generate questions from it. We're going to use LLamaIndex and OpenAI to generate the questions. We've also included the datasets pre-generated if you want to skip this part.\n", + "\n", + "Lets start with getting the content from each of the objects in our data." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting llama-index-finetuning\n", + " Downloading llama_index_finetuning-0.1.4-py3-none-any.whl (26 kB)\n", + "Collecting llama-index-embeddings-adapter<0.2.0,>=0.1.2\n", + " Downloading llama_index_embeddings_adapter-0.1.3-py3-none-any.whl (4.5 kB)\n", + "Collecting llama-index-postprocessor-cohere-rerank<0.2.0,>=0.1.1\n", + " Downloading llama_index_postprocessor_cohere_rerank-0.1.2-py3-none-any.whl (2.7 kB)\n", + "Collecting sentence-transformers<3.0.0,>=2.3.0\n", + " Downloading sentence_transformers-2.5.1-py3-none-any.whl (156 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m156.5/156.5 KB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hCollecting llama-index-llms-openai<0.2.0,>=0.1.1\n", + " Downloading llama_index_llms_openai-0.1.7-py3-none-any.whl (9.3 kB)\n", + "Collecting llama-index-core<0.11.0,>=0.10.11.post1\n", + " Downloading llama_index_core-0.10.14.post1-py3-none-any.whl (15.3 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.3/15.3 MB\u001b[0m \u001b[31m46.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hCollecting llama-index-llms-gradient<0.2.0,>=0.1.1\n", + " Downloading llama_index_llms_gradient-0.1.2-py3-none-any.whl (2.9 kB)\n", + "Collecting typing-extensions>=4.5.0\n", + " Using cached typing_extensions-4.10.0-py3-none-any.whl (33 kB)\n", + "Collecting tenacity<9.0.0,>=8.2.0\n", + " Using cached tenacity-8.2.3-py3-none-any.whl (24 kB)\n", + "Collecting fsspec>=2023.5.0\n", + " Using cached fsspec-2024.2.0-py3-none-any.whl (170 kB)\n", + "Collecting tiktoken>=0.3.3\n", + " Using cached tiktoken-0.6.0-cp310-cp310-macosx_11_0_arm64.whl (949 kB)\n", + "Collecting dataclasses-json\n", + " Using cached dataclasses_json-0.6.4-py3-none-any.whl (28 kB)\n", + "Collecting nltk<4.0.0,>=3.8.1\n", + " Using cached nltk-3.8.1-py3-none-any.whl (1.5 MB)\n", + "Collecting requests>=2.31.0\n", + " Using cached requests-2.31.0-py3-none-any.whl (62 kB)\n", + "Collecting llamaindex-py-client<0.2.0,>=0.1.13\n", + " Downloading llamaindex_py_client-0.1.13-py3-none-any.whl (107 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m108.0/108.0 KB\u001b[0m \u001b[31m17.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting aiohttp<4.0.0,>=3.8.6\n", + " Using cached aiohttp-3.9.3-cp310-cp310-macosx_11_0_arm64.whl (387 kB)\n", + "Collecting PyYAML>=6.0.1\n", + " Using cached PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl (169 kB)\n", + "Collecting httpx\n", + " Using cached httpx-0.27.0-py3-none-any.whl (75 kB)\n", + "Collecting deprecated>=1.2.9.3\n", + " Using cached Deprecated-1.2.14-py2.py3-none-any.whl (9.6 kB)\n", + "Collecting dirtyjson<2.0.0,>=1.0.8\n", + " Using cached dirtyjson-1.0.8-py3-none-any.whl (25 kB)\n", + "Collecting openai>=1.1.0\n", + " Downloading openai-1.13.3-py3-none-any.whl (227 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.4/227.4 KB\u001b[0m \u001b[31m30.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting numpy\n", + " Using cached numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl (14.0 MB)\n", + "Requirement already satisfied: nest-asyncio<2.0.0,>=1.5.8 in /Users/tylerbritten/Developer/HPE/ezua-tutorials/.direnv/python-3.10/lib/python3.10/site-packages (from llama-index-core<0.11.0,>=0.10.11.post1->llama-index-finetuning) (1.6.0)\n", + "Collecting pandas\n", + " Downloading pandas-2.2.1-cp310-cp310-macosx_11_0_arm64.whl (11.3 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m11.3/11.3 MB\u001b[0m \u001b[31m66.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m0:01\u001b[0m\n", + "\u001b[?25hCollecting tqdm<5.0.0,>=4.66.1\n", + " Using cached tqdm-4.66.2-py3-none-any.whl (78 kB)\n", + "Collecting typing-inspect>=0.8.0\n", + " Using cached typing_inspect-0.9.0-py3-none-any.whl (8.8 kB)\n", + "Collecting networkx>=3.0\n", + " Using cached networkx-3.2.1-py3-none-any.whl (1.6 MB)\n", + "Collecting SQLAlchemy[asyncio]>=1.4.49\n", + " Downloading SQLAlchemy-2.0.27-cp310-cp310-macosx_11_0_arm64.whl (2.1 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.1/2.1 MB\u001b[0m \u001b[31m69.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting pillow>=9.0.0\n", + " Using cached pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl (3.3 MB)\n", + "Collecting torch<3.0.0,>=2.1.2\n", + " Downloading torch-2.2.1-cp310-none-macosx_11_0_arm64.whl (59.7 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m59.7/59.7 MB\u001b[0m \u001b[31m52.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hCollecting gradientai<2.0.0,>=1.6.0\n", + " Using cached gradientai-1.7.0-py3-none-any.whl (270 kB)\n", + "Collecting cohere<5.0,>=4.45\n", + " Downloading cohere-4.51-py3-none-any.whl (52 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m52.0/52.0 KB\u001b[0m \u001b[31m6.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting scikit-learn\n", + " Downloading scikit_learn-1.4.1.post1-cp310-cp310-macosx_12_0_arm64.whl (10.4 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m10.4/10.4 MB\u001b[0m \u001b[31m72.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hCollecting huggingface-hub>=0.15.1\n", + " Downloading huggingface_hub-0.21.3-py3-none-any.whl (346 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m346.2/346.2 KB\u001b[0m \u001b[31m51.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting scipy\n", + " Using cached scipy-1.12.0-cp310-cp310-macosx_12_0_arm64.whl (31.4 MB)\n", + "Collecting transformers<5.0.0,>=4.32.0\n", + " Downloading transformers-4.38.2-py3-none-any.whl (8.5 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m8.5/8.5 MB\u001b[0m \u001b[31m72.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hCollecting yarl<2.0,>=1.0\n", + " Using cached yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl (79 kB)\n", + "Collecting multidict<7.0,>=4.5\n", + " Using cached multidict-6.0.5-cp310-cp310-macosx_11_0_arm64.whl (30 kB)\n", + "Collecting frozenlist>=1.1.1\n", + " Using cached frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl (52 kB)\n", + "Collecting aiosignal>=1.1.2\n", + " Using cached aiosignal-1.3.1-py3-none-any.whl (7.6 kB)\n", + "Collecting attrs>=17.3.0\n", + " Using cached attrs-23.2.0-py3-none-any.whl (60 kB)\n", + "Collecting async-timeout<5.0,>=4.0\n", + " Using cached async_timeout-4.0.3-py3-none-any.whl (5.7 kB)\n", + "Collecting importlib_metadata<7.0,>=6.0\n", + " Using cached importlib_metadata-6.11.0-py3-none-any.whl (23 kB)\n", + "Collecting backoff<3.0,>=2.0\n", + " Using cached backoff-2.2.1-py3-none-any.whl (15 kB)\n", + "Collecting fastavro<2.0,>=1.8\n", + " Using cached fastavro-1.9.4.tar.gz (985 kB)\n", + " Installing build dependencies ... \u001b[?25ldone\n", + "\u001b[?25h Getting requirements to build wheel ... \u001b[?25ldone\n", + "\u001b[?25h Preparing metadata (pyproject.toml) ... \u001b[?25ldone\n", + "\u001b[?25hCollecting urllib3<3,>=1.26\n", + " Using cached urllib3-2.2.1-py3-none-any.whl (121 kB)\n", + "Collecting wrapt<2,>=1.10\n", + " Using cached wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl (38 kB)\n", + "Collecting aenum>=3.1.11\n", + " Using cached aenum-3.1.15-py3-none-any.whl (137 kB)\n", + "Collecting pydantic<2.0.0,>=1.10.5\n", + " Using cached pydantic-1.10.14-cp310-cp310-macosx_11_0_arm64.whl (2.6 MB)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /Users/tylerbritten/Developer/HPE/ezua-tutorials/.direnv/python-3.10/lib/python3.10/site-packages (from gradientai<2.0.0,>=1.6.0->llama-index-llms-gradient<0.2.0,>=0.1.1->llama-index-finetuning) (2.9.0)\n", + "Collecting filelock\n", + " Using cached filelock-3.13.1-py3-none-any.whl (11 kB)\n", + "Requirement already satisfied: packaging>=20.9 in /Users/tylerbritten/Developer/HPE/ezua-tutorials/.direnv/python-3.10/lib/python3.10/site-packages (from huggingface-hub>=0.15.1->sentence-transformers<3.0.0,>=2.3.0->llama-index-finetuning) (23.2)\n", + "Collecting httpcore==1.*\n", + " Using cached httpcore-1.0.4-py3-none-any.whl (77 kB)\n", + "Collecting certifi\n", + " Using cached certifi-2024.2.2-py3-none-any.whl (163 kB)\n", + "Collecting idna\n", + " Using cached idna-3.6-py3-none-any.whl (61 kB)\n", + "Collecting anyio\n", + " Using cached anyio-4.3.0-py3-none-any.whl (85 kB)\n", + "Collecting sniffio\n", + " Using cached sniffio-1.3.1-py3-none-any.whl (10 kB)\n", + "Collecting h11<0.15,>=0.13\n", + " Using cached h11-0.14.0-py3-none-any.whl (58 kB)\n", + "Collecting regex>=2021.8.3\n", + " Using cached regex-2023.12.25-cp310-cp310-macosx_11_0_arm64.whl (291 kB)\n", + "Collecting click\n", + " Using cached click-8.1.7-py3-none-any.whl (97 kB)\n", + "Collecting joblib\n", + " Using cached joblib-1.3.2-py3-none-any.whl (302 kB)\n", + "Collecting distro<2,>=1.7.0\n", + " Using cached distro-1.9.0-py3-none-any.whl (20 kB)\n", + "Collecting charset-normalizer<4,>=2\n", + " Using cached charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl (120 kB)\n", + "Collecting greenlet!=0.4.17\n", + " Using cached greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl (270 kB)\n", + "Collecting jinja2\n", + " Using cached Jinja2-3.1.3-py3-none-any.whl (133 kB)\n", + "Collecting sympy\n", + " Using cached sympy-1.12-py3-none-any.whl (5.7 MB)\n", + "Collecting safetensors>=0.4.1\n", + " Using cached safetensors-0.4.2-cp310-cp310-macosx_11_0_arm64.whl (393 kB)\n", + "Collecting tokenizers<0.19,>=0.14\n", + " Using cached tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl (2.4 MB)\n", + "Collecting mypy-extensions>=0.3.0\n", + " Using cached mypy_extensions-1.0.0-py3-none-any.whl (4.7 kB)\n", + "Collecting marshmallow<4.0.0,>=3.18.0\n", + " Downloading marshmallow-3.21.0-py3-none-any.whl (49 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.4/49.4 KB\u001b[0m \u001b[31m5.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting tzdata>=2022.7\n", + " Using cached tzdata-2024.1-py2.py3-none-any.whl (345 kB)\n", + "Collecting pytz>=2020.1\n", + " Using cached pytz-2024.1-py2.py3-none-any.whl (505 kB)\n", + "Collecting threadpoolctl>=2.0.0\n", + " Downloading threadpoolctl-3.3.0-py3-none-any.whl (17 kB)\n", + "Requirement already satisfied: exceptiongroup>=1.0.2 in /Users/tylerbritten/Developer/HPE/ezua-tutorials/.direnv/python-3.10/lib/python3.10/site-packages (from anyio->httpx->llama-index-core<0.11.0,>=0.10.11.post1->llama-index-finetuning) (1.2.0)\n", + "Collecting zipp>=0.5\n", + " Using cached zipp-3.17.0-py3-none-any.whl (7.4 kB)\n", + "Requirement already satisfied: six>=1.5 in /Users/tylerbritten/Developer/HPE/ezua-tutorials/.direnv/python-3.10/lib/python3.10/site-packages (from python-dateutil>=2.8.2->gradientai<2.0.0,>=1.6.0->llama-index-llms-gradient<0.2.0,>=0.1.1->llama-index-finetuning) (1.16.0)\n", + "Collecting MarkupSafe>=2.0\n", + " Using cached MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl (18 kB)\n", + "Collecting mpmath>=0.19\n", + " Using cached mpmath-1.3.0-py3-none-any.whl (536 kB)\n", + "Building wheels for collected packages: fastavro\n", + " Building wheel for fastavro (pyproject.toml) ... \u001b[?25ldone\n", + "\u001b[?25h Created wheel for fastavro: filename=fastavro-1.9.4-cp310-cp310-macosx_13_0_arm64.whl size=527960 sha256=f8eaa898e01cb30b810b3bc7385e99f1d7033ead82806f1128eaff5de45b1ff7\n", + " Stored in directory: /Users/tylerbritten/Library/Caches/pip/wheels/f5/cc/c3/8d2ab49631057f55a6512033e2c58145f81a8c49508f45e839\n", + "Successfully built fastavro\n", + "Installing collected packages: pytz, mpmath, dirtyjson, aenum, zipp, wrapt, urllib3, tzdata, typing-extensions, tqdm, threadpoolctl, tenacity, sympy, sniffio, safetensors, regex, PyYAML, pillow, numpy, networkx, mypy-extensions, multidict, marshmallow, MarkupSafe, joblib, idna, h11, greenlet, fsspec, frozenlist, filelock, fastavro, distro, click, charset-normalizer, certifi, backoff, attrs, async-timeout, yarl, typing-inspect, SQLAlchemy, scipy, requests, pydantic, pandas, nltk, jinja2, importlib_metadata, httpcore, deprecated, anyio, aiosignal, torch, tiktoken, scikit-learn, huggingface-hub, httpx, gradientai, dataclasses-json, aiohttp, tokenizers, openai, llamaindex-py-client, cohere, transformers, llama-index-core, sentence-transformers, llama-index-postprocessor-cohere-rerank, llama-index-llms-openai, llama-index-llms-gradient, llama-index-embeddings-adapter, llama-index-finetuning\n", + "Successfully installed MarkupSafe-2.1.5 PyYAML-6.0.1 SQLAlchemy-2.0.27 aenum-3.1.15 aiohttp-3.9.3 aiosignal-1.3.1 anyio-4.3.0 async-timeout-4.0.3 attrs-23.2.0 backoff-2.2.1 certifi-2024.2.2 charset-normalizer-3.3.2 click-8.1.7 cohere-4.51 dataclasses-json-0.6.4 deprecated-1.2.14 dirtyjson-1.0.8 distro-1.9.0 fastavro-1.9.4 filelock-3.13.1 frozenlist-1.4.1 fsspec-2024.2.0 gradientai-1.7.0 greenlet-3.0.3 h11-0.14.0 httpcore-1.0.4 httpx-0.27.0 huggingface-hub-0.21.3 idna-3.6 importlib_metadata-6.11.0 jinja2-3.1.3 joblib-1.3.2 llama-index-core-0.10.14.post1 llama-index-embeddings-adapter-0.1.3 llama-index-finetuning-0.1.4 llama-index-llms-gradient-0.1.2 llama-index-llms-openai-0.1.7 llama-index-postprocessor-cohere-rerank-0.1.2 llamaindex-py-client-0.1.13 marshmallow-3.21.0 mpmath-1.3.0 multidict-6.0.5 mypy-extensions-1.0.0 networkx-3.2.1 nltk-3.8.1 numpy-1.26.4 openai-1.13.3 pandas-2.2.1 pillow-10.2.0 pydantic-1.10.14 pytz-2024.1 regex-2023.12.25 requests-2.31.0 safetensors-0.4.2 scikit-learn-1.4.1.post1 scipy-1.12.0 sentence-transformers-2.5.1 sniffio-1.3.1 sympy-1.12 tenacity-8.2.3 threadpoolctl-3.3.0 tiktoken-0.6.0 tokenizers-0.15.2 torch-2.2.1 tqdm-4.66.2 transformers-4.38.2 typing-extensions-4.10.0 typing-inspect-0.9.0 tzdata-2024.1 urllib3-2.2.1 wrapt-1.16.0 yarl-1.9.4 zipp-3.17.0\n", + "\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n", + "You should consider upgrading via the '/Users/tylerbritten/Developer/HPE/ezua-tutorials/.direnv/python-3.10/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n", + "\u001b[0m" + ] + } + ], + "source": [ + "!pip install llama-index-finetuning" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1825\n", + "['Release Date: September 25, 2023 Breaking Changes Kubernetes: Remove the agent_reattach_enabled config option. Agent reattach is now always enabled. Agent: Take the default value for the --visible-gpus option from the CUDA_VISIBLE_DEVICES or ROCR_VISIBLE_DEVICES environment variables, if defined. New Features SDK: Add the ability to keep track of what experiments use a particular checkpoint or model version for inference. SDK: Add Checkpoint.get_metrics and ModelVersion.get_metrics methods. Kubernetes: Support enabling and disabling agents to prevent Determined from scheduling jobs on specific nodes. Upgrading from a version before this feature to a version after this feature only on Kubernetes will cause queued allocations to be killed on upgrade. Users can pause queued experiments to avoid this. Improvements Enable reporting and display of metrics with floating-point epoch values. API: Allow the reporting of duplicate metrics across multiple report_metrics calls with the same steps_completed, provided they have identical values. SDK: stream_trials_training_metrics() and stream_trials_validation_metrics() are now deprecated. Please use stream_trials_metrics() instead. The corresponding methods of Determined and TrialReference have also been updated similarly. Bug Fixes Checkpoints: Fix an issue where in certain situations duplicate checkpoints with the same UUID would be returned by the WebUI and the CLI. Models: Fix a bug where det model describe and other methods in the CLI and SDK that act on a single model would error if two models had similar names. Workspaces: Fix an issue where notebooks, TensorBoards, shells, and commands would not inherit agent user group and agent user information from their workspace.', 'Release Date: September 11, 2023 Breaking Changes Fluent Bit is no longer used for log shipping and configs associated with Fluent Bit are now no longer in use. Fluent Bit has been replaced with an internal log shipper (the same one that is used for Slurm). Bug Fixes Reduce the time before seeing the first metrics of a new experiment.', 'Release Date: August 29, 2023 Breaking Changes Remove EstimatorTrial, which has been deprecated since Determined version 0.22.0 (May 2023). Bug Fixes Trials: Fix an issue where trial logs could fail for trials created prior to Determined version 0.17.0. CLI: Fix an issue where template association with workspaces, when listed, was missing. This would prevent templates from being listed for some users and templates on RBAC-enabled clusters.', 'Release Date: August 18, 2023 Breaking Changes API: Remove LightningAdapter, which was deprecated in 0.23.1 (June 2023). We recommend that PyTorch Lightning users migrate to the Core API. New Features Environments: Add experimental PyTorch 2.0 images containing PyTorch 2.0.1, Python 3.10.12, and (for the GPU image) CUDA 11.8. Bug Fixes Users: Fix an issue that caused the CLI command det user list to always show “false” in the “remote” column.', 'Release Date: July 31, 2023 Breaking Changes API: The /api/v1/users/setting endpoint no longer accepts storagePath and now accepts a settings array instead of a single setting. New Features Allow non-intersecting dictionaries of metrics to be merged on the same total_batches. This update was rejected before. API: Add a new patch API endpoint /api/v1/master/config that allows the user to make changes to the master config while the cluster is running. Currently, only changing the log config is supported. CLI: Add a new CLI command det master config --log --level --color that allows the user to change the log level and color settings of the master config while the cluster is still running. det master config can still be used to get the master config. Cluster: Allow binding resource pools to specific workspaces. Bound resource pools can only be used by the workspaces they are bound to. Each workspace can also now have a default compute resource pool and a default auxiliary resource pool configured. Kubernetes: Users may now populate all securityContext fields within the pod spec of the determined-container container except for RunAsUser and RunAsGroup. For those fields, use det user link-with-agent-user instead. WebUI: The experiment list page now has the following new capabilities: Select metrics and hyperparameters as columns. Filter the list on any available column. Specify complex filters. Sort the list on any available column. Display total number of experiments matching the filter. Compare metrics, hyperparameters, and trial details across experiments. Toggle between pagination and infinite scroll. Select preferred table density. Improvements WebUI: Improve performance and stability.']\n" + ] + } + ], + "source": [ + "import os\n", + "import json\n", + "\n", + "def extract_single_value_from_json_files(directory, key):\n", + " \"\"\"\n", + " Reads every JSON file in a directory and extracts a single value from each object based on the specified key.\n", + "\n", + " Args:\n", + " - directory (str): The directory path containing JSON files.\n", + " - key (str): The key to extract from each object.\n", + "\n", + " Returns:\n", + " - values_list (list): A list containing the extracted values from all JSON files.\n", + " \"\"\"\n", + " values_list = []\n", + " for filename in os.listdir(directory):\n", + " if filename.endswith('.json'):\n", + " file_path = os.path.join(directory, filename)\n", + " with open(file_path, 'r') as file:\n", + " try:\n", + " data = json.load(file)\n", + " for obj in data:\n", + " if key in obj and obj[key] is not None:\n", + " values_list.append(obj[key])\n", + " except json.JSONDecodeError:\n", + " print(f\"Error decoding JSON file: {file_path}\")\n", + " return values_list\n", + "\n", + "# Example usage:\n", + "directory_path = './documents'\n", + "key_to_extract = 'content'\n", + "result = extract_single_value_from_json_files(directory_path, key_to_extract)\n", + "print(len(result))\n", + "print(result[:5])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Ok, we've read in the content into a list. Now we'll want to parse them with a sentence splitter to build nodes." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/tylerbritten/Developer/HPE/ezua-tutorials/.direnv/python-3.10/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n", + "Parsing nodes: 100%|██████████| 1825/1825 [00:01<00:00, 917.15it/s] \n" + ] + } + ], + "source": [ + "from llama_index.core import Document\n", + "from llama_index.core.node_parser import SentenceSplitter\n", + "\n", + "node_parser = SentenceSplitter(chunk_size=1024, chunk_overlap=20)\n", + "\n", + "\n", + "text_list = result\n", + "documents = [Document(text=t) for t in text_list]\n", + "\n", + "nodes = node_parser.get_nodes_from_documents(documents,show_progress=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{}\n", + "Release Date: September 25, 2023 Breaking Changes Kubernetes: Remove the agent_reattach_enabled config option. Agent reattach is now always enabled. Agent: Take the default value for the --visible-gpus option from the CUDA_VISIBLE_DEVICES or ROCR_VISIBLE_DEVICES environment variables, if defined. New Features SDK: Add the ability to keep track of what experiments use a particular checkpoint or model version for inference. SDK: Add Checkpoint.get_metrics and ModelVersion.get_metrics methods. Kubernetes: Support enabling and disabling agents to prevent Determined from scheduling jobs on specific nodes. Upgrading from a version before this feature to a version after this feature only on Kubernetes will cause queued allocations to be killed on upgrade. Users can pause queued experiments to avoid this. Improvements Enable reporting and display of metrics with floating-point epoch values. API: Allow the reporting of duplicate metrics across multiple report_metrics calls with the same steps_completed, provided they have identical values. SDK: stream_trials_training_metrics() and stream_trials_validation_metrics() are now deprecated. Please use stream_trials_metrics() instead. The corresponding methods of Determined and TrialReference have also been updated similarly. Bug Fixes Checkpoints: Fix an issue where in certain situations duplicate checkpoints with the same UUID would be returned by the WebUI and the CLI. Models: Fix a bug where det model describe and other methods in the CLI and SDK that act on a single model would error if two models had similar names. Workspaces: Fix an issue where notebooks, TensorBoards, shells, and commands would not inherit agent user group and agent user information from their workspace.\n", + "--------\n", + "{}\n", + "Release Date: September 11, 2023 Breaking Changes Fluent Bit is no longer used for log shipping and configs associated with Fluent Bit are now no longer in use. Fluent Bit has been replaced with an internal log shipper (the same one that is used for Slurm). Bug Fixes Reduce the time before seeing the first metrics of a new experiment.\n" + ] + } + ], + "source": [ + "print(nodes[0].metadata)\n", + "print(nodes[0].text)\n", + "print(\"--------\")\n", + "print(nodes[1].metadata)\n", + "print(nodes[1].text)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cool, now we have our text chunked up in a list of nodes. Next thing we're going to do is take a sample of the data. How about 250 each for training and validation." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "250 250\n" + ] + } + ], + "source": [ + "import random\n", + "subset = random.sample(nodes, 500)\n", + "test, train = subset[:250], subset[250:]\n", + "\n", + "print(len(test), len(train))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Perfect, now we have 250 chunks randomly sampled from our data for training and 250 for validation. Lets use OpenAI gpt-3.5-turbo model to generate questions for these chunks. After that we'll store them in json to use for training. You can skip this part and use the existing json files in the `experiment/` folder instead. If you do decide to run it, replace the existing files in the experiment folder with the ones you generated." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.finetuning import generate_qa_embedding_pairs\n", + "from llama_index.core.evaluation import EmbeddingQAFinetuneDataset\n", + "import os\n", + "from llama_index.llms.openai import OpenAI\n", + "\n", + "os.environ[\"OPENAI_API_KEY\"] = \"your-api-key-here\"\n", + "\n", + "train_dataset = generate_qa_embedding_pairs(\n", + " llm=OpenAI(model=\"gpt-3.5-turbo\"), nodes=train\n", + ")\n", + "test_dataset = generate_qa_embedding_pairs(\n", + " llm=OpenAI(model=\"gpt-3.5-turbo\"), nodes=test\n", + ")\n", + "\n", + "train_dataset.save_json(\"demo_dataset.json\")\n", + "test_dataset.save_json(\"test_dataset.json\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training on MLDE\n", + "\n", + "Now that we have our data, lets fine tune a model on MLDE. We're going to use `BAAI/bge-m3` but any of the `BAAI` `bge` models should work well enough.\n", + "We're going to send our experiment to MLDE. Make sure you have the determined client installed (`pip install determined`) and that you're logged in (`det -m auth login`)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!det -m https://mlde.i006ua.tryezmeral.com:443 e create experiment/const.yaml ./experiment" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "python-3.10", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/demos/rag-demos/question-answering-gpu/README.md b/demos/rag-demos/question-answering-gpu/README.md new file mode 100644 index 00000000..813dc2ce --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/README.md @@ -0,0 +1,149 @@ +# MLDE Demo (WiP) + +In this tutorial, you build a question-answering RAG (Retrieval Augmented Generation) system using +an open source Large Language Model (LLM) and a fine-tuned embeddings model. This system can answer +questions from a corpus of private documentation. In this use case, we are using the Ezmeral +Unified Analytics (EzUA) and the Ezmeral Data Fabric (EzDF) documentation as our dataset. + +![llm-high-level](images/llm-figure.png) + +To make this happen, you: + +* Deploy an embeddings model, fine-tuned on a corpus of private documentation. +* Use this model to build and deploy a Vector Store, designed to capture and index the latent + representation of each document effectively. +* Deploy an LLM to respond to inquiries, leveraging the context extracted from the Vector Store, in + a natural language format. + +1. [What You'll Need](#what-youll-need) +1. [Procedure](#procedure) +1. [Troubleshooting](#troubleshooting) +1. [How It Works](#how-it-works) +1. [Clean Up](#clean-up) +1. [References](#references) + +## What You'll Need + +For this tutorial, ensure you have: + +- Access to an HPE Ezmeral Unified Analytics (EzUA) cluster. +- The fine-tuned [BGE-M3](https://ezmeral-artifacts.s3.us-east-2.amazonaws.com/bge-m3.tar.gz) + embeddings model this tutorial uses. +- Access to the Llama 2 7B LLM, along with its compiled TensorRT-LLM engines. You can clone the + [Llama 2 7B](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) model from Hugging Face and + download the TensorRT-LLM engines from the [HPE Ezmeral Artifacts](https://ezmeral-artifacts.s3.us-east-2.amazonaws.com/llama-engines.tar.gz). + +## Procedure + +To complete the tutorial follow the steps below: + +1. Login to your EzUA cluster, using your credentials. +1. Create a new Notebook server using the `dpoulopoulos/jupyter:v1.7.0-dirty` image. Request at + least `8Gi` of memory for the Notebook server, `4` CPUs and `1` GPU device. +1. Connect to the Notebook server, launch a new terminal window, and clone the repository locally. + See the troubleshooting section if this step fails. +1. Navigate to the tutorial's directory (`ezua-tutorials/demos/question-answering-llama`) +1. Install the required dependencies: + ``` + pip install -r requirements.txt + ``` +1. Launch the five Notebooks in order and execute the code cells. +1. Use the EzUA "Import Framework" wizard to upload the tarball located inside the `application` + folder. This creates a user interface for your application. Complete the steps and wait for a new + endpoint to become ready. +1. Connect to the endpoint and submit your questions. + +## Troubleshooting + +If you're operating behind a proxy, you'll need to configure several environment variables to +successfully clone the `ezua-tutorials` repository to your local machine, install its dependencies +using `pip`, and download the necessary Machine Learning (ML) models from external sources. + +To clone the repository and install the necessary Python libraries using `pip`, launch a terminal +window and execute the following commands: + +- `export http_proxy=` +- `export https_proxy=` + +## How It Works + +This project taps into a BGE-M3 embeddings model [1] to translate the sentences and paragraphs from +a private document corpus into a multi-dimensional dense vector space [2]. By doing so, you can +employ sophisticated techniques like semantic search, empowering users to pinpoint relevant +details swiftly and precisely. The generated vectors are stored and indexed within a local +[Chroma](https://www.trychroma.com/) instance, a cloud-native, open-source embedding database [3]. + +Once a question is provided, the application embeds it into the previously mentioned vector space +using the same embedding model. By default, it fetches the four top relevant documents, utilizing a +particular algorithm (most commonly kNN) [4]. Subsequently, the application relays the user's +question along with the retrieved context to an LLM, ensuring the answer mirrors human-like speech. +This tutorial uses the Llama 2 7B model, served with KServe. KServe uses the Triton Inference Server +as its backend which in turn uses the TensorRT-LLM backend, which provides several optimizations, +such as PagedAttention (vLLM) [5], FlashAttention [6], etc. + +Now, onto the serving details: Using [KServe](https://kserve.github.io/website/0.11/) [6] you can +set up an Inference Service (ISVC) with custom components. To do this, you need to build and push +two Docker images which KServe will use to serve both the Vector Store and the transformer component +of the LLM ISVC. Below are the application directories containing the respective Dockerfiles +to build these images: + +- Vector Store: [`dockerfiles/vectorstore`](dockerfiles/vectorstore) +- LLM Transformer: [`dockerfiles/transformer`](dockerfiles/transformer) + +> For your convenience, you can use the pre-built images we have prepared for you: +> - Vector Store: `dpoulopoulos/qna-vectorstore-mlde:v0.1.0` +> - LLM Transformer: `dpoulopoulos/qna-transformer-mlde:v0.1.0` + +Once the images are ready, proceed to run the Notebooks. The project consists of five Notebooks. +Launch and run each Notebook to explore and execute the experiment end-to-end: + +1. `01.create-vectorstore`: Load the documents from your private corpus (e.g., the `documents` + folder), process them, and create the Vector Store. +1. `02.serve-vectorstore`: Create an ISVC for the Vector Store. +1. `03.document-precition` (optional): Invoke the Vector Store ISVC. +1. `04.serve-llm`: Create an ISVC for the LLM. +1. `05.question-answering`: Invoke the LLM ISVC. Post a question to the LLM ISVC and get back a + human-like answer. + +The last Notebook outlines the user's perspective. The application flow is depicted in the following figure: + +![flow-chart](images/llm-flowchart.svg) + +1. User: Fine-tune an embeddings model on the private documentation. +1. User: Push the fine-tuned model in a model registry. +1. User: Serve the embeddings model using KServe and the Triton Inference Server. +1. User: Use the embedings model to transform the documents intro numerical representations. +1. User: Ingest document embeddings, documents data, and metadata in the Vector Store. +1. User: Ask a new question. +1. LLM ISVC Transformer: Intercept the request, extract the user's query, and create a new request + to the Vector Store ISVC predictor passing the user's question in the payload. +1. Vector Store ISVC Predictor: Extract the user's question from the request of the LLM ISVC + Transformer and create a new request to the embeddings model ISVC to encode the uset's query. +1. Embeddings model ISVC Predictor: Encode the user's query and return it to the Vector Store ISVC. +1. Vector Store ISVC Predictor: Ask the Vector Store for the `k` most relevant documents. +1. Vector Store: Retrieve the `k` most relevant documents. +1. Vector Store: Respond to the LLM ISVC Predictor with a relevant context. +1. LLM ISVC Transformer: Get the most relevant documents from the Vector Store ISVC predictor response, create a new + request to the LLM ISVC predictor passing the context and the user's question. +1. LLM ISVC Predictor: Extract the user's question as well as the context, and answer the user's question based on the + relevant context. +1. LLM ISVC: Respond to the user with the completion prediction. + +## Clean Up + +To clean up the resources used during this experiment, follow the steps below: + +1. Go to the Kubeflow Endpoints UI and delete the ISVCs for the LLM model, the Vector Store, and the + embeddings model. +1. Go to the EzUA "Import Framework" dashboard and delete the front-end application. +1. Go into the project directory in the notebook server and delete the `db` directory which houses + the vector store artifacts. + +## References + +1. [BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation](https://arxiv.org/abs/2402.03216) +1. [A High-Level Introduction To Word Embeddings](https://predictivehacks.com/a-high-level-introduction-to-word-embeddings/) +1. [Chroma Database - The AI-native open-source embedding database](https://docs.trychroma.com/) +1. [Nearest Neighbor Indexes for Similarity Search](https://www.pinecone.io/learn/series/faiss/vector-indexes/) +1. [Efficient Memory Management for Large Language Model Serving with PagedAttention](https://arxiv.org/abs/2309.06180) +1. [FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness](https://arxiv.org/abs/2205.14135) diff --git a/demos/question-answering/application/helm/.helmignore b/demos/rag-demos/question-answering-gpu/application/helm/.helmignore similarity index 100% rename from demos/question-answering/application/helm/.helmignore rename to demos/rag-demos/question-answering-gpu/application/helm/.helmignore diff --git a/demos/question-answering/application/helm/Chart.yaml b/demos/rag-demos/question-answering-gpu/application/helm/Chart.yaml similarity index 100% rename from demos/question-answering/application/helm/Chart.yaml rename to demos/rag-demos/question-answering-gpu/application/helm/Chart.yaml diff --git a/demos/question-answering/application/helm/templates/NOTES.txt b/demos/rag-demos/question-answering-gpu/application/helm/templates/NOTES.txt similarity index 100% rename from demos/question-answering/application/helm/templates/NOTES.txt rename to demos/rag-demos/question-answering-gpu/application/helm/templates/NOTES.txt diff --git a/demos/question-answering/application/helm/templates/_helpers.tpl b/demos/rag-demos/question-answering-gpu/application/helm/templates/_helpers.tpl similarity index 100% rename from demos/question-answering/application/helm/templates/_helpers.tpl rename to demos/rag-demos/question-answering-gpu/application/helm/templates/_helpers.tpl diff --git a/demos/question-answering/application/helm/templates/authpolicy.yaml b/demos/rag-demos/question-answering-gpu/application/helm/templates/authpolicy.yaml similarity index 100% rename from demos/question-answering/application/helm/templates/authpolicy.yaml rename to demos/rag-demos/question-answering-gpu/application/helm/templates/authpolicy.yaml diff --git a/demos/question-answering/application/helm/templates/deployment.yaml b/demos/rag-demos/question-answering-gpu/application/helm/templates/deployment.yaml similarity index 100% rename from demos/question-answering/application/helm/templates/deployment.yaml rename to demos/rag-demos/question-answering-gpu/application/helm/templates/deployment.yaml diff --git a/demos/question-answering/application/helm/templates/hpa.yaml b/demos/rag-demos/question-answering-gpu/application/helm/templates/hpa.yaml similarity index 100% rename from demos/question-answering/application/helm/templates/hpa.yaml rename to demos/rag-demos/question-answering-gpu/application/helm/templates/hpa.yaml diff --git a/demos/question-answering/application/helm/templates/ingress.yaml b/demos/rag-demos/question-answering-gpu/application/helm/templates/ingress.yaml similarity index 100% rename from demos/question-answering/application/helm/templates/ingress.yaml rename to demos/rag-demos/question-answering-gpu/application/helm/templates/ingress.yaml diff --git a/demos/question-answering/application/helm/templates/service.yaml b/demos/rag-demos/question-answering-gpu/application/helm/templates/service.yaml similarity index 100% rename from demos/question-answering/application/helm/templates/service.yaml rename to demos/rag-demos/question-answering-gpu/application/helm/templates/service.yaml diff --git a/demos/question-answering/application/helm/templates/serviceaccount.yaml b/demos/rag-demos/question-answering-gpu/application/helm/templates/serviceaccount.yaml similarity index 100% rename from demos/question-answering/application/helm/templates/serviceaccount.yaml rename to demos/rag-demos/question-answering-gpu/application/helm/templates/serviceaccount.yaml diff --git a/demos/question-answering/application/helm/templates/virtualService.yaml b/demos/rag-demos/question-answering-gpu/application/helm/templates/virtualService.yaml similarity index 100% rename from demos/question-answering/application/helm/templates/virtualService.yaml rename to demos/rag-demos/question-answering-gpu/application/helm/templates/virtualService.yaml diff --git a/demos/rag-demos/question-answering-gpu/application/helm/values.yaml b/demos/rag-demos/question-answering-gpu/application/helm/values.yaml new file mode 100644 index 00000000..7d47b244 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/application/helm/values.yaml @@ -0,0 +1,94 @@ +# Default values for secure-qna app. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: dpoulopoulos/qna-app-mlde + pullPolicy: Always + # Overrides the image tag whose default is the chart appVersion. + tag: "v0.1.0" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} +# fsGroup: 2000 + +securityContext: +# capabilities: +# drop: +# - ALL +# readOnlyRootFilesystem: true +# runAsNonRoot: true + runAsUser: 0 + +service: + type: ClusterIP + port: 80 + targetPort: 8080 + +#Platform related options +ezua: + domainName: "${DOMAIN_NAME}" + #Use next options in order to configure the application endpoint. + #Example of a VirtualService is here: + virtualService: + endpoint: "secure-qna.${DOMAIN_NAME}" + istioGateway: "istio-system/ezaf-gateway" + + authorizationPolicy: + namespace: "istio-system" + providerName: "oauth2-proxy" + matchLabels: + istio: "ingressgateway" + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 4 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/demos/rag-demos/question-answering-gpu/application/qna-app.tar.gz b/demos/rag-demos/question-answering-gpu/application/qna-app.tar.gz new file mode 100644 index 00000000..f3dc1f58 Binary files /dev/null and b/demos/rag-demos/question-answering-gpu/application/qna-app.tar.gz differ diff --git a/demos/question-answering/dockerfiles/app/Dockerfile b/demos/rag-demos/question-answering-gpu/dockerfiles/app/Dockerfile similarity index 100% rename from demos/question-answering/dockerfiles/app/Dockerfile rename to demos/rag-demos/question-answering-gpu/dockerfiles/app/Dockerfile diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/app/images/app-header.png b/demos/rag-demos/question-answering-gpu/dockerfiles/app/images/app-header.png new file mode 100644 index 00000000..1f01f4c5 Binary files /dev/null and b/demos/rag-demos/question-answering-gpu/dockerfiles/app/images/app-header.png differ diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/app/images/logo.png b/demos/rag-demos/question-answering-gpu/dockerfiles/app/images/logo.png new file mode 100644 index 00000000..3434a788 Binary files /dev/null and b/demos/rag-demos/question-answering-gpu/dockerfiles/app/images/logo.png differ diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/app/src/app.py b/demos/rag-demos/question-answering-gpu/dockerfiles/app/src/app.py new file mode 100644 index 00000000..5b1ab6b8 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/dockerfiles/app/src/app.py @@ -0,0 +1,204 @@ +import json +import logging + +import argparse +import gradio as gr +import requests +from theme import EzmeralTheme + + +DOMAIN_NAME = "svc.cluster.local" +NAMESPACE = open( + "/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r").read() + + +HEADER = """ +
+ ai-enabled-search +
+""" + + +EXAMPLES = [ + ["What is EzPresto?"], + ["How do I get started with Ezmeral Unified Analytics?"], + ["What are the major deep learning frameworks that Determined is compatible with?"], +] + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +subdomain = None +model_name = None + + +def llm_service( + question, + temperature, + num_docs, + max_tokens, + top_k, + top_p, + context_check, + request: gr.Request, +): + SVC = f"{subdomain}.{NAMESPACE}.{DOMAIN_NAME}" + URL = f"https://{SVC}/v1/models/{model_name}:predict" + + data = { + "input": question, + "max_tokens": int(max_tokens), + "top_k": int(top_k), + "top_p": top_p, + "num_docs": int(num_docs), + "temperature": temperature, + } + + if not context_check: + data = {**data, **{"context": " "}} + + payload = {"instances": [data]} + + headers = {"Authorization": request.headers.get("authorization")} + response = requests.post(URL, json=payload, headers=headers, verify=False) + + text = json.loads(response.text)["outputs"][2]["data"][0] + trimmed_text = ( + text.split("[INST]", 1)[-1].split("[/INST]", 1)[0] + if "[INST]" in text and "[/INST]" in text + else text + ) + result = ( + text.replace(trimmed_text, "") + .replace("[INST]", "") + .replace("[/INST]", "") + .strip() + ) + + return result + + +def main(llm_server_subdomain, llm_name): + global subdomain, model_name + subdomain = llm_server_subdomain + model_name = llm_name + + with gr.Blocks(theme=EzmeralTheme()) as app: + # Application Header + gr.HTML(HEADER) + + # Main Section + with gr.Row(): + question = gr.Textbox(label="Question", autofocus=True) + with gr.Row(): + with gr.Column(): + submit_btn = gr.Button("Submit", variant="primary") + with gr.Column(): + clear_btn = gr.ClearButton(value="Reset", variant="secondary") + + # Advanced Settings + with gr.Accordion("Advanced options", open=False): + with gr.Row(): + with gr.Column(): + temperature = gr.Slider( + label="Temperature", + minimum=0.0, + maximum=1.0, + value=0.2, + info="The model temperature. Larger values increase" + " creativity but decrease factuality.", + ) + max_tokens = gr.Number( + label="Max Tokens", + minimum=10, + maximum=1000, + value=100, + info="The maximum number of tokens to generate.", + ) + num_docs = gr.Number( + label="Number of documents to retrieve", + minimum=1, + maximum=4, + value=4, + info="The maximum number of documents to retrieve" + " from the vector store.", + ) + with gr.Column(): + top_k = gr.Number( + label="Top k", + minimum=5, + maximum=200, + value=40, + info="Randomly sample from the top_k most likely" + " tokens at each generation step. Set this to 1" + " for greedy decoding.", + ) + top_p = gr.Slider( + label="Top p", + minimum=0.1, + maximum=1.0, + value=0.4, + info="Randomly sample at each generation step from the" + " top most likely tokens whose probabilities add" + " up to top_p.", + ) + with gr.Row(): + context_check = gr.Checkbox( + value=True, + label="Use knowledge base", + info="Do you want to retrieve and use relevant context" + " from your knowledge database?", + ) + + # Output Section + output = gr.Textbox(label="Answer") + + # Examples Section + gr.Examples(examples=EXAMPLES, inputs=[question]) + + # Event Handlers + submit_btn.click( + fn=llm_service, + inputs=[ + question, + temperature, + num_docs, + max_tokens, + top_k, + top_p, + context_check, + ], + outputs=[output], + ) + + clear_btn.click( + lambda: [None, 0.2, 4, 100, 40, 0.4, True, None], + [], + outputs=[ + question, + temperature, + num_docs, + max_tokens, + top_k, + top_p, + context_check, + output, + ], + ) + + app.launch(server_name="0.0.0.0", server_port=8080) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--llm-server-subdomain", type=str, + default="ensemble-transformer") + parser.add_argument("--llm-name", type=str, default="ensemble") + args = parser.parse_args() + + logger.info("LLM Server Subdomain: %s", args.llm_server_subdomain) + logger.info("Model Name: %s", args.llm_name) + + main(args.llm_server_subdomain, args.llm_name) diff --git a/demos/question-answering/dockerfiles/app/src/requirements.txt b/demos/rag-demos/question-answering-gpu/dockerfiles/app/src/requirements.txt similarity index 100% rename from demos/question-answering/dockerfiles/app/src/requirements.txt rename to demos/rag-demos/question-answering-gpu/dockerfiles/app/src/requirements.txt diff --git a/demos/question-answering/dockerfiles/app/src/theme.py b/demos/rag-demos/question-answering-gpu/dockerfiles/app/src/theme.py similarity index 100% rename from demos/question-answering/dockerfiles/app/src/theme.py rename to demos/rag-demos/question-answering-gpu/dockerfiles/app/src/theme.py diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/training/Dockerfile b/demos/rag-demos/question-answering-gpu/dockerfiles/training/Dockerfile new file mode 100644 index 00000000..d0b432a7 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/dockerfiles/training/Dockerfile @@ -0,0 +1,2 @@ +FROM determinedai/environments:cuda-11.3-pytorch-1.12-tf-2.11-gpu-622d512 +RUN pip install llama-index-finetuning==0.1.0 accelerate bitsandbytes diff --git a/demos/question-answering/dockerfiles/transformer/Dockerfile b/demos/rag-demos/question-answering-gpu/dockerfiles/transformer/Dockerfile similarity index 100% rename from demos/question-answering/dockerfiles/transformer/Dockerfile rename to demos/rag-demos/question-answering-gpu/dockerfiles/transformer/Dockerfile diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/transformer/src/model.py b/demos/rag-demos/question-answering-gpu/dockerfiles/transformer/src/model.py new file mode 100644 index 00000000..0d422b80 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/dockerfiles/transformer/src/model.py @@ -0,0 +1,223 @@ +import json +import logging +import argparse +import requests + +import httpx + +from kserve import Model, ModelServer, model_server + +PROMPT_TEMPLATE = """ +[INST] +You are an AI assistant. You will be given a task. You must generate a detailed +answer. + +Use the following pieces of context to answer the question at the end. If you +don't know the answer, just say that you don't know, don't try to make up an +answer. + +{context} + +{input} +[/INST] +""" + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class Transformer(Model): + def __init__( + self, + name: str, + predictor_host: str, + protocol: str, + use_ssl: bool, + vectorstore_endpoint: str, + vectorstore_name: str, + ): + super().__init__(name) + + # KServe specific arguments + self.name = name + self.predictor_host = predictor_host + self.protocol = protocol + self.use_ssl = use_ssl + self.ready = True + + # Transformer specific arguments + self.vectorstore_endpoint = vectorstore_endpoint + self.vectorstore_name = vectorstore_name + + self.vectorstore_url = self._build_vectorstore_url() + + def _get_namespace(self): + return open( + "/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r" + ).read() + + def _build_vectorstore_url(self): + domain_name = "svc.cluster.local" + namespace = self._get_namespace() + deployment_name = self.vectorstore_endpoint + model_name = self.vectorstore_name + + # Build the vectorstore URL + svc = f"{deployment_name}.{namespace}.{domain_name}" + url = f"https://{svc}/v1/models/{model_name}:predict" + return url + + def _build_request( + self, + query: str, + context: str, + max_tokens: int, + top_k: int, + top_p: float, + temperature: float, + ): + prompt = PROMPT_TEMPLATE.format(context=context, input=query) + + inference_request = { + "inputs": [ + { + "name": "text_input", + "datatype": "BYTES", + "shape": [1, 1], + "data": [f"{prompt}"], + }, + { + "name": "max_tokens", + "datatype": "INT32", + "shape": [1, 1], + "data": [max_tokens], + }, + { + "name": "top_k", + "datatype": "INT32", + "shape": [1, 1], + "data": [top_k], + }, + { + "name": "top_p", + "datatype": "FP32", + "shape": [1, 1], + "data": [top_p], + }, + { + "name": "temperature", + "datatype": "FP32", + "shape": [1, 1], + "data": [temperature], + }, + ] + } + + return inference_request + + @property + def _http_client(self): + headers = {"Authorization": self.authorization} + self._http_client_instance = httpx.AsyncClient( + headers=headers, verify=False + ) + return self._http_client_instance + + def preprocess(self, request: dict, headers: dict) -> dict: + self.authorization = headers["authorization"] + + data = request["instances"][0] + query = data["input"] + + max_tokens = data.get("max_tokens", None) + if not max_tokens: + logger.warning("`max_tokens` not provided. Defaulting to 100") + max_tokens = 100 + + top_k = data.get("top_k", None) + if not top_k: + logger.warning("`top_k` not provided. Defaulting to 4") + top_k = 4 + + top_p = data.get("top_p", None) + if not top_p: + logger.warning("`top_p` not provided. Defaulting to 0.4") + top_p = 0.4 + + temperature = data.get("temperature", None) + if not temperature: + logger.warning("`temperature` not provided. Defaulting to 0.1") + temperature = 0.1 + + num_docs = data.get("num_docs", None) + if not num_docs: + logger.warning("`num_docs` not provided. Defaulting to 2") + num_docs = 2 + + context = data.get("context", None) + + if context: + logger.info("Skipping retrieval step...") + return self._build_request( + query, context, max_tokens, top_k, top_p, temperature + ) + else: + payload = {"instances": [{"input": query, "num_docs": num_docs}]} + headers = {"Authorization": self.authorization} + + logger.info( + f"Receiving relevant docs from: {self.vectorstore_url}") + + response = requests.post( + self.vectorstore_url, json=payload, headers=headers, verify=False + ) + response = json.loads(response.text) + + context = "\n".join(response["predictions"]) + + logger.info(f"Received documents: {context}") + + return self._build_request( + query, context, max_tokens, top_k, top_p, temperature + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(parents=[model_server.parser]) + parser.add_argument( + "--predictor_host", help="The URL for the model predict function", + required=True + ) + parser.add_argument( + "--protocol", help="The protocol for the predictor", default="v1" + ) + parser.add_argument( + "--model_name", help="The name that the model is served under." + ) + parser.add_argument( + "--use_ssl", help="Use ssl for connecting to the predictor", + action="store_true" + ) + parser.add_argument( + "--vectorstore_endpoint", + default="vectorstore-predictor", + help="The endpoint of the Vector Store Inference Service", + ) + parser.add_argument( + "--vectorstore_name", + default="vectorstore", + help="The name of the Vector Store Inference Service", + ) + args, _ = parser.parse_known_args() + + logger.info(args) + + model = Transformer( + args.model_name, + args.predictor_host, + args.protocol, + args.use_ssl, + args.vectorstore_endpoint, + args.vectorstore_name, + ) + ModelServer().start([model]) diff --git a/demos/question-answering/dockerfiles/transformer/src/requirements.txt b/demos/rag-demos/question-answering-gpu/dockerfiles/transformer/src/requirements.txt similarity index 100% rename from demos/question-answering/dockerfiles/transformer/src/requirements.txt rename to demos/rag-demos/question-answering-gpu/dockerfiles/transformer/src/requirements.txt diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/triton-inference-server/Dockerfile b/demos/rag-demos/question-answering-gpu/dockerfiles/triton-inference-server/Dockerfile new file mode 100644 index 00000000..daa39131 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/dockerfiles/triton-inference-server/Dockerfile @@ -0,0 +1,2 @@ +FROM nvcr.io/nvidia/tritonserver:24.01-py3 +RUN pip install transformers==4.34.0 protobuf==3.20.3 sentencepiece==0.1.99 accelerate==0.23.0 einops==0.6.1 diff --git a/demos/question-answering/dockerfiles/vectorstore/Dockerfile b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/Dockerfile similarity index 58% rename from demos/question-answering/dockerfiles/vectorstore/Dockerfile rename to demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/Dockerfile index 2c2c61d1..7d04ee4c 100644 --- a/demos/question-answering/dockerfiles/vectorstore/Dockerfile +++ b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/Dockerfile @@ -1,12 +1,15 @@ -FROM python:3.9.0-slim +FROM python:3.11.5-bookworm RUN apt update && apt install build-essential -y COPY src/requirements.txt src/requirements.txt -RUN pip install --upgrade pip && pip install -r /src/requirements.txt +RUN pip install --upgrade pip +RUN pip install pip install -r /src/requirements.txt +RUN pip install kserve==0.11.2 COPY src/model.py src/model.py COPY src/utils.py src/utils.py +COPY src/embeddings.py src/embeddings.py RUN chmod -R 777 /src diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/embeddings.py b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/embeddings.py new file mode 100644 index 00000000..1634d366 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/embeddings.py @@ -0,0 +1,58 @@ +import json +import logging +import requests + + +logger = logging.getLogger(__name__) + + +class EmbeddingsClient: + def __init__(self, model_endpoint: str, model_name: str) -> None: + self._url = self._build_url(model_endpoint, model_name) + + @property + def authorization(self): + return self._authorization + + @authorization.setter + def authorization(self, authorization): + self._authorization = authorization + + def _get_namespace(self): + return open( + "/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r" + ).read() + + def _build_url(self, model_endpoint, model_name,): + domain_name = "svc.cluster.local" + namespace = self._get_namespace() + + svc = f"{model_endpoint}.{namespace}.{domain_name}" + url = f"https://{svc}/v2/models/{model_name}/infer" + + return url + + def embed_query(self, query): + headers = {"Authorization": self.authorization} + + logger.info(f"Sending request to {self._url} with query {query}...") + + inference_request = { + "inputs": [ + { + "name": "text_input", + "datatype": "BYTES", + "shape": [1, 1], + "data": [f"{query}"], + } + ] + } + + response = requests.post( + self._url, json=inference_request, headers=headers, verify=False + ) + + logger.info(f"Received response: {response.text}") + + if response.status_code == 200: + return json.loads(response.text)["outputs"][0]["data"] diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/model.py b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/model.py new file mode 100644 index 00000000..f740f6b2 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/model.py @@ -0,0 +1,79 @@ +import logging +import argparse + +import kserve + +from langchain.vectorstores import Chroma + +from utils import download_directory +from embeddings import EmbeddingsClient + + +logger = logging.getLogger(__name__) + +DEFAULT_NUM_DOCS = 2 + + +class VectorStore(kserve.Model): + def __init__( + self, name: str, persist_uri: str, model_endpoint: str, model_name: str + ): + super().__init__(name) + self.name = name + self._prepare_vectorstore(persist_uri, model_endpoint, model_name) + + self.ready = True + + def _prepare_vectorstore( + self, uri: str, model_endpoint: str, model_name: str, ): + self.embeddings = EmbeddingsClient(model_endpoint, model_name) + persist_dir = download_directory(uri) + self.vectordb = Chroma( + persist_directory=persist_dir, embedding_function=self.embeddings + ) + + def predict(self, request: dict, headers: dict) -> dict: + authorization = headers["authorization"] + self.embeddings.authorization = authorization + + data = request["instances"][0] + query = data["input"] + num_docs = data.get("num_docs", DEFAULT_NUM_DOCS) + + logger.info(f"Received question: {query}") + + docs = self.vectordb.similarity_search(query, k=num_docs) + + logger.info(f"Retrieved context: {docs}") + + return {"predictions": [doc.page_content for doc in docs]} + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + prog="VectorStore", description="VectorStore server" + ) + parser.add_argument( + "--persist-uri", + type=str, + required=True, + help="The location of the persisted VectorStore.", + ) + parser.add_argument( + "--model-endpoint", + type=str, + default="bge-predictor", + help="The endpoint of the embeddings model inference service.", + ) + parser.add_argument( + "--model-name", + type=str, + default="bge", + help="The name of the embeddings model.", + ) + args = parser.parse_args() + + model = VectorStore( + "vectorstore", args.persist_uri, args.model_endpoint, args.model_name + ) + kserve.ModelServer(workers=1).start([model]) diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/requirements.txt b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/requirements.txt new file mode 100644 index 00000000..d69ab8ce --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/requirements.txt @@ -0,0 +1,3 @@ +boto3==1.34.41 +chromadb==0.4.22 +langchain==0.1.7 diff --git a/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/utils.py b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/utils.py new file mode 100644 index 00000000..0fe04ed4 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/dockerfiles/vectorstore/src/utils.py @@ -0,0 +1,46 @@ +import os +import logging +import boto3 +import urllib + + +logger = logging.getLogger(__name__) + +access_key = os.getenv("MINIO_ACCESS_KEY") +secret_key = os.getenv("MINIO_SECRET_KEY") +endpoint_url = os.getenv("MLFLOW_S3_ENDPOINT_URL") + + +def _get_s3_client(): + return boto3.client( + service_name="s3", + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + endpoint_url=endpoint_url, + verify=False) + + +def download_directory(uri: str) -> str: + """Download the directore in the given URI. + + Args: + uri (str): The URI of the directory object. + """ + local_dir = "db" + + client = _get_s3_client() + + parsed_uri = urllib.parse.urlparse(uri) + bucket_name = parsed_uri.netloc + s3_folder = parsed_uri.path.lstrip('/') + + paginator = client.get_paginator('list_objects_v2') + for page in paginator.paginate(Bucket=bucket_name, Prefix=s3_folder): + for obj in page.get('Contents', []): + local_file_path = os.path.join(local_dir, obj['Key'][len(s3_folder):].lstrip('/')) + if not os.path.exists(os.path.dirname(local_file_path)): + os.makedirs(os.path.dirname(local_file_path)) + client.download_file(bucket_name, obj['Key'], local_file_path) + print(f"Downloaded {obj['Key']} to {local_file_path}") + + return "db" diff --git a/demos/rag-demos/question-answering-gpu/documents/EzDF.json b/demos/rag-demos/question-answering-gpu/documents/EzDF.json new file mode 100644 index 00000000..b1acc6e1 --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/documents/EzDF.json @@ -0,0 +1,1457 @@ +[ + { + "content": "\nGet Started Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Get Started This section describes how you can get started learning about, installing, and using\n the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6.0 Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . (Topic last modified: 2023-04-23) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/get_started/get_started_main.html", + "title": "Get Started" + }, + { + "content": "\nHPE Ezmeral Data Fabric 7.6.0 Release Notes Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. What's New in Release 7.6.0 HPE Ezmeral Data Fabric 7.6.0 is a new data-storage platform from Hewlett Packard Enterprise that offers a multimodal, subscription-based, hybrid-cloud experience for the enterprise. Known Issues (Release 7.6.0) Describes known issues you might encounter in release 7.6.0. This list is current as of the release date. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6.0 Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. What's New in HPE Ezmeral Data Fabric 7.6.0 HPE Ezmeral Data Fabric 7.6.0 is a new data-storage platform from Hewlett Packard Enterprise that offers a multimodal, subscription-based, hybrid-cloud experience for the enterprise. Known Issues (Release 7.6.0) Describes known issues you might encounter in release 7.6.0. This list is current as of the release date. \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/get_started/relnotes_intro.html", + "title": "HPE Ezmeral Data Fabric 7.6.0 Release Notes" + }, + { + "content": "\nWhat's New in HPE Ezmeral Data Fabric 7.6.0 Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. What's New in Release 7.6.0 HPE Ezmeral Data Fabric 7.6.0 is a new data-storage platform from Hewlett Packard Enterprise that offers a multimodal, subscription-based, hybrid-cloud experience for the enterprise. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. What's New in Release 7.6.0 HPE Ezmeral Data Fabric 7.6.0 is a new data-storage platform from Hewlett Packard Enterprise that offers a multimodal, subscription-based, hybrid-cloud experience for the enterprise. Known Issues (Release 7.6.0) Describes known issues you might encounter in release 7.6.0. This list is current as of the release date. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . What's New in HPE Ezmeral Data Fabric 7.6.0 HPE Ezmeral Data Fabric 7.6.0 is a new data-storage platform from\n Hewlett Packard Enterprise that offers a multimodal, subscription-based, hybrid-cloud experience\n for the enterprise. New Features for Release 7.6.0 Following are some new features and capabilities that distinguish release 7.6.0 from the\n previous release (7.5.0): New Feature or Capability Supported on See for more information . . . DF SaaS? Customer Managed? Support for Spark to query Iceberg tables stored in the Data Fabric Yes Yes Iceberg Support and Getting Started with Iceberg Unified management of security policies for files Yes Yes* Administering Security Policies Support for attaching multiple security policies to one or more\n volumes Yes Yes Assigning Multiple Security Policies to One or More Volumes Binary table replication Yes Yes* Managing Table Replication Global namespace support for NFS- and S3-protocol operations performed\n against WEKA, VAST Data, and Scality Yes Yes Third-Party Storage Solutions IPv6 support Yes Yes IPv6 Support in Data Fabric Data Fabric UI enhancement: Exposing object\n endpoints for Data Fabric volumes Yes Yes* Viewing Object Endpoint Info to Remotely Access Files as Objects Data Fabric UI enhancement: Support for downloading\n endpoint information for selected volumes Yes Yes* Downloading Volume Endpoint Information Data Fabric UI enhancement: Support for\n reinitiating fabric creation following an error Yes Yes* Steps for Creating a Fabric and Troubleshooting Upgrade Issues Data Fabric UI enhancement: Searchable, full-page\n listing of Keycloak users with access to the Data Fabric UI Yes Yes* Viewing a List of Users External S3 support with generic vendor details Yes Yes Importing an External S3 Object Store and clustergroup addexternal *Requires using the Data Fabric UI . With release 7.3.0 and later,\n you can use the Data Fabric UI on customer-managed clusters. To\n understand the limitations and benefits of doing so, see Data Fabric UI . Welcome to the New HPE Ezmeral Data Fabric Release 7.6.0 introduces a new HPE Ezmeral Data Fabric that is: Managed as a service Subscription-based Designed to make data accessible The single logical view integrates files, objects, and streaming data and features\n consumption-based pricing. A global namespace enables\n deployments to join a single fabric no matter where the data is located. Managed As a Service With the new HPE Ezmeral Data Fabric platform, HPE manages the\n configuration, upgrade, and lifecycle of the platform \u2013 as long as the platform is\n connected. Deploy to the Cloud or On-Premises Release 7.6.0 of the HPE Ezmeral Data Fabric provides a\n high-performance file system for files, objects, and streaming files that can be deployed\n quickly on: Amazon Web Services (AWS) Microsoft Azure Google Cloud Platform (GCP) Your on-premises infrastructure The Data Fabric simplifies how you manage\n petabyte-scale data, enabling on-premises files to be transferred as native S3 objects to\n the cloud, where upstream apps can transform the data. Results can be stored either\n on-premises or on the cloud as objects. HPE Ezmeral Data Fabric makes\n it easy to migrate workloads across clouds or on-premise installations without needing\n format changes. Storage Tiers and Consumption With the new HPE Ezmeral Data Fabric , the amount of storage you use\n determines your monthly charges for the service. HPE offers licenses for the following storage tiers : 1 TB 10 TB 100 TB 1 PB The storage tier is a baseline for consumption. You select a storage tier when you\n purchase a license and confirm the tier when you create a fabric. You can consume more\n storage than your specified storage tier, but rates are higher when your consumption exceeds\n the specified tier. For current rate information, contact your HPE sales representative. The Data Fabric UI enables you to monitor your consumption and provides\n estimates of your monthly charges. Comparing the Data Fabric Platforms The new as-a-service HPE Ezmeral Data Fabric leverages the strengths\n of its predecessor, the HPE Ezmeral Data Fabric \u2013 Customer Managed platform. The\n as-a-service platform also improves on its predecessor in many ways. The following table\n compares the platforms: Feature HPE Ezmeral Data Fabric HPE Ezmeral Data Fabric \u2013 Customer Managed Distributed File System Yes Yes Global Namespace (GNS) Yes No Object Support Yes Yes Event Stream Support Yes Yes NFSv4 Support* Yes Yes Container Storage Interface (CSI) Support Yes Yes Database Support Yes Yes Client Support Yes Yes Single Sign-On (SSO) Support Yes Yes (release 7.3.0 and later) Platform Management Managed by HPE User managed Billing and Licensing Consumption-only and some form of term (term-only not supported) Consumption and Term Air-Gap Support Yes Yes Graphical User Interface Data Fabric UI or Control System Data Fabric UI or Control\n System maprcli Command Line Yes Yes Scale (number of nodes per fabric / cluster) See note** Thousands of nodes EEP (HPE Ezmeral Ecosystem Pack) No Yes OpenTelemetry (OTel) Yes Yes *NFSv3 is not supported. **For cloud deployments, the nodes and instances are predetermined based on the storage\n tier that you select during installation. For on-premises deployments, you determine the\n number of nodes at create time. While new fabrics can be added at any time, adding nodes\n after fabric creation is not currently supported. Terminology Some terms used to describe the new HPE Ezmeral Data Fabric are\n different from terms used to describe the HPE Ezmeral Data Fabric \u2013 Customer Managed platform. The following table highlights some terminology differences: Term Definition Comparable HPE Ezmeral Data Fabric \u2013 Customer Managed Term Data Fabric UI The new user interface for managing the HPE Ezmeral Data Fabric . See Data Fabric UI . Control System fabric A software-defined storage system that provides multi-modal access to data.\n Fabrics help you manage your data, making it possible to access, integrate, model,\n analyze, and provision your data seamlessly. Multiple fabrics share a common\n global namespace. cluster global namespace A data plane that spans all of your Data Fabric deployments and includes all of the as-a-service fabrics in your enterprise.\n See Global Namespace (GNS) . global namespace HPE Ezmeral Data Fabric Now refers to the consumption-based, as-a-service Data Fabric platform. Data Fabric deployments that are not consumption based\n and not provided as-a-service are now referred to as \"customer managed.\" HPE Ezmeral Data Fabric \u2013 Customer Managed (Topic last modified: 2024-01-31) On this page New Features for Release 7.6.0 Welcome to the New HPE Ezmeral Data Fabric Managed As a Service Deploy to the Cloud or On-Premises Storage Tiers and Consumption Comparing the Data Fabric Platforms Terminology \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/get_started/whats_new.html", + "title": "What's New in HPE Ezmeral Data Fabric 7.6.0" + }, + { + "content": "\nKnown Issues (Release 7.6.0) Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Known Issues (Release 7.6.0) Describes known issues you might encounter in release 7.6.0. This list is current as of the release date. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. What's New in Release 7.6.0 HPE Ezmeral Data Fabric 7.6.0 is a new data-storage platform from Hewlett Packard Enterprise that offers a multimodal, subscription-based, hybrid-cloud experience for the enterprise. Known Issues (Release 7.6.0) Describes known issues you might encounter in release 7.6.0. This list is current as of the release date. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Known Issues (Release 7.6.0) Describes known issues you might encounter in release 7.6.0. This list is current as of\n the release date. Where available, the workaround for an issue is also documented. Hewlett Packard Enterprise\n regularly releases maintenance releases and patches to fix issues. We recommend checking the\n release notes for any subsequent maintenance releases to see if one or more of these issues\n are fixed. Client Libraries MFS-18249 When you refresh the JWT access token and refresh token for a FUSE-based POSIX\n client manually, the FUSE-based POSIX client does not list the ticket, and remains\n in an inactive and dead state. Workaround: None. MFS-18258 When you add a new cluster to a cluster group, the FUSE-based POSIX client and the\n loopbacknfs POSIX client take about five mintues to load or list the newly added\n cluster. Workaround: None. Data Fabric UI Sign-in Issues DFUI-160 If you sign in to the Data Fabric UI as an SSO user but\n you do not have fabric-level login permission, a sign-in page for the \"Managed Control\n System\" (MCS) is displayed. The \"Managed Control System\" sign-in is not usable for the\n consumption-based HPE Ezmeral Data Fabric . Workaround: Use one of the following workarounds: Edit the MCS URL, and retry logging in. For example, change the boldface\n characters in the following\n URL: https;//:8443/app/ mcs/#/app/overview To\n this: https;//:8443/app/ dfui Try signing in as a user who has fabric-level login permission. Dismiss the MCS page, clear your browser cache, and retry signing in. DFUI-437 If you sign in to the Data Fabric UI as a non-SSO user and\n then sign out and try to sign in as an SSO user, a sign-in page for the \"Managed\n Control System\" (MCS) is displayed. The \"Managed Control System\" sign-in is not usable\n for the consumption-based HPE Ezmeral Data Fabric . Workaround: Use one of the following workarounds: Edit the MCS URL, and retry logging in. For example, change the boldface\n characters in the following\n URL: https;//:8443/app/ mcs/#/app/overview To\n this: https;//:8443/app/ dfui Dismiss the \"Managed Control System\" sign-in screen, and retry signing in as a\n non-SSO user. Dismiss the MCS page, clear your browser cache, and retry signing in. DFUI-811 If you launch the Data Fabric UI and then sign out and\n wait for 5-10 minutes and then attempt to sign in, a sign-in page for the \"Managed\n Control System\" (MCS) is displayed. Workaround: See the workaround for DFUI-437. DFUI-826 In a cloud fabric, an empty page is displayed after a session expires and you\n subsequently click on a fabric name. The browser can display the following\n URL: https://:8443/oath/login Workaround: None. DFUI-874 Sometimes when you attempt to sign in to the Data Fabric UI , the \"Managed Control System\" (MCS) is displayed, or the Object Store UI is\n displayed. Workaround: See the workaround for DFUI-437. DFUI-897 A user with no assigned role cannot sign in to the Data Fabric UI . Workaround: Using your SSO provider software, assign a role to the user, and\n retry the sign-in operation. DFUI-902 Incorrect resource data is displayed when an LDAP user signs in to the Data Fabric UI without any SSO roles. Workaround: See the workaround for DFUI-897 DFUI-1123 Attempting to sign in to the Data Fabric UI as a group\n results in a login error message in the browser. For\n example: https://:8443/login?error Workaround: None. DFUI-1135 The Data Fabric UI does not allow an SSO user to log in\n after an unsuccessful login attempt. Workaround: None. Mirroring Issues DFUI-1227 If you create a mirror volume with a security policy, an error is generated when you\n try to remove the security policy. Workaround: None. DFUI-1229 Data aces on a mirror volume cannot be edited. Workaround: None. Display Issues DFUI-1186 After you complete the SSO setup for a new fabric, fabric resources such as volumes\n and mirrors are not immediately displayed in the Data Fabric UI . Workaround: Wait at least 20 minutes or more for the Data Fabric UI to display the fabric details. DFUI-1221 If a fabric includes a large number of resources, loading the resources to display in\n the Resources card on the home page can take a long time. Workaround: None. MFS-18188 When you create a column in a table, the column name is prepended with \"v.\" in the Column Permissions tab. When you create a new column, an\n additional \"v.\" is prepended to the names of existing columns. Workaround: None. DFUI-2127 When you set the access control of a table to Public , the Column families and Replication tabs do\n not display the create or edit options for your tables and table replicas. Workaround: None. DFUI-1793 When you set the access control of a column or column family to Public , the Name field is enabled. Workaround: None. DFUI-2102 When you create a table replica on a primary cluster with the source table on a\n secondary cluster, the replication operation times out. However, the table replica is\n successfully created on the primary cluster. The table replica appears in the Replication tab, but does not appear in the Data Fabric UI Graph or Table view for the primary\n cluster. This behavior is the same for both a source table on the primary cluster and\n the replica on the secondary cluster. Workaround: None. DFUI-2099 When you delete a table replica from the Data Fabric UI Home page, the table replica remains listed in the Replication tab. When you select the table on the Replication tab, a message returns stating that the requested\n file does not exist. Workaround: None. DFUI-2128/DFUI-2135 For external S3 buckets, the size of individual buckets is displayed as zero on the\n Resources card, even when the individual buckets contain one or more objects. Workaround: None DFUI-2136 The Overview tab for an external S3 bucket does not display any bucket-related\n information. Workaround: None DFUI-2095 Security policy created on global policy master immediately before creating a volume\n on secondary fabric is not available to assign to the volume, while creating the\n volume. Workaround: Wait for about 15 minutes and then create the volume to\n which you wish to assign the new security policy. DFUI-2161 When a node has multiple IP addresses (for example, the node has both IPv4 and IPv6\n addresses associated with it), the IP addresses are missing a comma or a space, or any\n other separator between the individual IP addresses, on node access point modal. Workaround: None. This issue should not affect the fabric functionality. DFUI-2175 The Client library side drawer displays the same export environment variable for both the access token and refresh\n token. The expected values are as\n follows: export MAPR_JWT_TOKEN_LOCATION=\"/root/jwt_access\" export MAPR_REFRESH_TOKEN_LOCATION=\"/root/jwt_refresh\" However,\n the Client library side drawer displays the\n following: export MAPR_JWT_TOKEN_LOCATION=\"/root/jwt_access\" export MAPR_JWT_TOKEN_LOCATION = \"/root/jwt_refresh\" Workaround: None. External S3 DFUI-2157 Editing buckets on external S3 servers is not supported. Workaround: None. Installation DFUI-565, EZINDFAAS-169 Installation or fabric creation can fail if a proxy is used for internet traffic\n with the HPE Ezmeral Data Fabric . Workaround: Export the following proxy settings, and retry the\n operation: # cat /etc/environment\nexport http_proxy=http://:\nexport https_proxy=http://:\nexport HTTP_PROXY=http://:\nexport HTTPS_PROXY=http://: DFUI-1224 Under Administration , in the Fabrics card, the Data Fabric UI can show the progress of\n installation as 100% and the status as Inactive before certain\n post-installation activities are complete. Workaround: None. This issue should not affect the fabric functionality. Object Store MFS-17233 On cloud (AWS, Azure, or GCP) fabrics, if an instance is rebooted, the public IP\n addresses can change. If this happens, the MOSS certificates must be regenerated to\n include the new IP addresses, and the changes must be propagated to all fabric\n nodes. Workaround: To regenerate the MOSS certificates: Identify the new external IP address for each cloud instance. On each cloud instance: Log on as a sudo user. Update the certificate using the following manageSSLKeys.sh command: /opt/mapr/server/manageSSLKeys.sh createusercert -u moss -ug mapr:mapr -k -p -ips \"\" -a moss -w Restart the MOSS\n service: maprcli node services -nodes 'hostname -f' -name s3server -action restart -json NOTE: You can obtain the ssl_keystore_password and ssl_truststore_password from the node where the configure.sh -secure -genkeys command was issued. In the /opt/mapr/conf/store-passwords.txt file, the passwords are\n listed under keys as ssl.server.keystore.keypassword and ssl.server.truststore.password . Use the following commands\n to ensure correct file\n ownership: chown mapr:mapr /opt/mapr/conf/ssl_usertruststore.p12\nchmod 0444 /opt/mapr/conf/ssl_usertruststore.p12\"\nchown mapr:mapr /opt/mapr/conf/ssl_userkeystore.p12\nchmod 0400 /opt/mapr/conf/ssl_userkeystore.p12\" DFUI-519 An SSO user is unable to create buckets on the Data Fabric UI and the Object Store. This is applicable to an SSO\n user with any role such as infrastructure administrator, fabric manager or developer. Workaround: Create an IAM policy with all permissions in the user account.\n This has to be done via minIO client or the Object Store UI. Assign the IAM policy to\n the SSO user. Login to the Data Fabric UI and create a\n bucket/view bucket. DFUI-577 Downloading a large file (1 GB or larger) can fail with the following\n error: Unable to download file \"\": Request failed with status code 500 Workaround: None. MFS-18250 The S3 server crashes when you copy a jumbo object (object size>256 MB) from one\n bucket to another bucket across fabrics using aws s3 cli. Workaround: Set the 'max_concurrent_requests' parameter value to 1 on the AWS\n configuration file. Online Help DFUI-459 If a proxy is used for internet traffic with the HPE Ezmeral Data Fabric , online help screens can time out or fail to\n fetch help content. Workaround: Add the following proxy servers to the /opt/mapr/apiserver/conf/properties.cfg file: http.proxy=: https.proxy=: Security Policies MFS-18154 A security policy created on a cloud-based primary fabric (such as AWS) is not\n replicated on to a secondary fabric created on another cloud provider (such as GCP). Workaround: None. Tables DFUI-1793 While setting column family permissions and column permission from DF UI, a user\n cannot set the type as Public, Workaround: Use the relevant maprcli command for table, to set ACE type as\n public for the column family. Topics DFUI-637 Non-LDAP SSO user authenticating to Keycloak cannot create topic on the Data Fabric UI . Workaround: None. DFUI-639 A non-LDAP SSO user authenticating to Keycloak cannot create a volume or stream\n using the Data Fabric UI . Workaround: None. Non-LDAP and SSO local users are not currently\n supported. Upgrade DFUI-2163 SSO authentication is not enabled for Data Fabric UI, after upgrading from HPE\n Ezmeral Data Fabric release version 7.5 to release version 7.6. Workaround: Restart the API server after upgrade. MFS-18222 Suppose you use the seed node deployment steps to create a fabric\n ( f1 ), and then you add a fabric ( f2 ). If you then\n upgrade f1 successfully as fabric manager, and then you sign in to f1 and initiate an upgrade of f2 , the upgrade of f2 might fail because of a state mismatch. Workaround: In the /deployments folder of f2 , find the upgrade.json file, and copy the file\n to the /deployments folder of f1 . Then update the\n state on f1 using the following\n command: maprcli installer upgradeinfo add -data -clustername -json Volumes DFUI-638 Non-LDAP SSO user authenticating to Keycloak cannot create volume on the Data Fabric UI . Workaround: Create a volume via the Data\n Fabric minIO client. (Topic last modified: 2024-02-01) On this page Client Libraries Data Fabric UI External S3 Installation Object Store Online Help Security Policies Tables Topics Upgrade Volumes \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/get_started/known_issues.html", + "title": "Known Issues (Release 7.6.0)" + }, + { + "content": "\nInstallation Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. IMPORTANT: To install the HPE Ezmeral Data Fabric \u2013 Customer Managed platform,\n see this\n website . Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. (Topic last modified: 2023-04-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/installation_main.html", + "title": "Installation" + }, + { + "content": "\nFabric Deployment Using a Seed Node Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric\n interface. The first fabric you deploy \u2013 whether it is in the cloud or on-premises \u2013 must be deployed\n using a seed node. The seed node creates a lightweight, temporary fabric. The installation\n sequence uses this temporary fabric to display the interface for fabric creation. The seed node is needed only for creation of the first fabric. Any additional fabric must be\n created from your cloud-based or on-premises fabric. The seed node can only be used to create\n fabrics. Creating volumes, buckets, or other resources from the seed node is not supported.\n For more information about creating fabrics, see Creating a Fabric . Seed Node Deployment Process To deploy the HPE Ezmeral Data Fabric in your environment, you run a\n script that starts a Docker container. The Docker container emulates the behavior of a Data Fabric node. This emulated node is the seed node. The\n seed node enables access to the Create Fabric interface used to\n create fabrics. Once the seed node is created and started, the system prints a URL that you use to access\n the Create Fabric interface. The interface enables you to complete\n the steps required to deploy a fabric in the cloud or on-premises. After the fabric is\n created, you can spawn additional fabrics from any installed fabric: Prerequisites for Fabric Deployment Before\n you deploy a fabric, review all of the following requirements: Seed Node\n Prerequisites Verify that the node you plan to use as the seed node meets the\n following prerequisites: Prerequisite Notes Users To install using a seed node, you must be root or a user\n that can run sudo commands without being prompted for a password. OS The seed node has been tested on the following operating systems, but it can\n work on other operating systems and in other environments where Docker containers\n are hosted: Mac OS 10 Ubuntu 20.04 Connectivity The seed node that you use to host the Docker container can be a server or\n laptop that supports the following, but must have connectivity to all the subnets\n and the VPN for the cloud provider: Docker * Bash SSH *On Ubuntu, RHEL, and Fedora hosts where Docker is not\n present, Docker is installed automatically. Installation of Docker can take 5-7\n minutes. On Mac hosts where Docker is not present, you must install Docker\n before proceeding with the seed-node deployment. Memory On the seed node, enough memory must be allocated to Docker to enable the\n container to come up and run. Docker must be installed and have the following\n memory allocated to it: Mac: At least 32 GB Other platforms: At least 24 GB This is a general recommendation. Sometimes the container can run with less\n memory. On a seed node with a large number containers competing for memory, this\n recommendation might not be sufficient. Disk Space If you use a Linux server for the seed node, allocate at least 50 GB of disk\n space to run the container. Proxy If the seed node is behind a proxy, update the proxy configuration in /etc/environment using the following commands. You must do this\n before attempting to create a\n fabric: export http_proxy=\nexport https_proxy=\nexport HTTP_PROXY=\nexport HTTPS_PROXY= To specify the proxy\n information in a file when you run the setup script, see Help for datafabric_container_setup.sh . On-Premises Deployment Prerequisites See Prerequisites for On-Premises Installation . Cloud Provider Prerequisites You must have\n sufficient permissions to perform tasks in the cloud environment you are using. HPE recommends\n the following minimum permissions for installers: Cloud Provider Minimum Permissions AWS AmazonEBSCSIDriverPolicy and AmazonEC2FullAccess Azure Contributor role GCP Editor role Before starting the deployment, gather the information that you will need to fill\n out the fabric-creation form for your cloud service provider. You can record the necessary\n information in the Planning Worksheet for Cloud Deployments . Docker Image\n Prerequisites The Docker image that must be used for HPE Ezmeral Data Fabric deployments is: maprtech/edf-seed-container:7.6.0_9.2.1_edf Users\n can also specify the maprtech/edf-seed-container:latest tag, which is\n always the latest version of the image. IMPORTANT: Only an image that has the _edf suffix in the image tag can be used to\n install the consumption-based, software-as-a-service platform. The Docker image name includes the Data Fabric core and\n ecosystem pack version information: You can download the image from the Docker hub\n at: https://hub.docker.com/r/maprtech/edf-seed-container/tags/ Or\n you can use the optional docker pull command as described in the following\n steps. After you verify that your system and environment meet the listed prerequisites,\n complete the following steps to deploy the HPE Ezmeral Data Fabric . Run the Script to Bring up the Container Image on the Seed Node Use the following steps to bring up the container image and launch the Create\n Fabric interface: Sign in to the seed node host as root or a user that can run sudo commands without being prompted for a password. Download the datafabric_container_setup.sh script from GitHub. For example, download the script in its raw form by using the\n following wget command: wget https://raw.githubusercontent.com/mapr-demos/edf-seednode-760-getting-started/main/datafabric_container_setup.sh Optional: Use a docker pull command to pre-download a copy of the image from https://hub.docker.com/r/maprtech/edf-seed-container/tags into the same directory as the script. Using docker\n pull requires sudo privileges: docker pull maprtech/edf-seed-container:latest IMPORTANT: Note these considerations: Pre-downloading is optional, but it makes the script run faster and prevents\n download issues when you run the script. The script checks to see if the image is\n already present on your system. If the image is present, the script uses the\n image. If it is not present, the script tries to download it. If you want to use the -i option, you must\n specify: maprtech/edf-seed-container:7.6.0_9.2.1_edf Modify the script so it is\n executable: chmod +x datafabric_container_setup.sh Before running the script, review the following considerations: Running the datafabric_container_setup.sh script requires sudo\n privileges. The script can take 5-10 minutes to run the first time you run it. Unless you\n pre-downloaded the image, the script downloads the latest Docker image from the\n Docker repository. For a list of available tags, see https://hub.docker.com/r/maprtech/edf-seed-container/tags . The script supports the -i option for specifying an image other\n than the latest image. The script also supports a -p option that\n must be used if the seed node is a cloud instance. To view the command line help for\n the script, see Help for datafabric_container_setup.sh . Run the script to deploy the container for the Data Fabric image: ./datafabric_container_setup.sh The script downloads the\n latest image. When the Docker image is running, you see the following\n output: ./datafabric_container_setup.sh\n RAM NEEDED : AVAILABLE\n DOCKER STATUS : RUNNING\n PORTS NEEDED : AVAILABLE\n PROCEEDING FORWARD WITH DEPLOYING SEED NODE\nPlease enter the local sudo password for root\n\nlatest: Pulling from maprtech/edf-seed-container\nDigest: sha256:b863f487de7eaa809b66f923aaec297ae1cab4fdb9441950fbe5c68328235a7a\nStatus: Downloaded newer image for maprtech/edf-seed-container:latest\ndocker.io/maprtech/edf-seed-container:latest\nDeveloper Sandbox Container 8935a11fb7d6 is running..\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\n\nClient has been configured with the docker container.\n\nPlease click on the link https://:8443/app/dfui to deploy data fabric\nFor user documentation, see https://docs.ezmeral.hpe.com/datafabric/home/installation/installation_main.html If\n the services do not come up, see Troubleshooting Seed Node Installation . Navigate to the link specified in the Docker output message in\n step 6. The Data Fabric UI displays the Create your first fabric form. Create the Fabric Bringing up the seed node automatically displays the fabric-creation interface. To create\n your first permanent fabric, complete the following steps. On the Create your first fabric form, fill in the configuration\n parameters for the type of fabric you want to create. For more information, see the\n following topics: AWS Fabric Configuration Parameters Azure Fabric Configuration Parameters GCP Fabric Configuration Parameters On-Premises Fabric Configuration Parameters Click Create . The Fabric details dialog\n box is displayed. For example: To monitor the progress of fabric creation, check the status bar in the Fabric details dialog box, or click See\n details . Fabric creation can take 20 minutes or more. If fabric creation\n is successful, the Data Fabric UI displays a message\n with the endpoint link for the new fabric. Using a browser, navigate to the endpoint link, and sign in to the Data Fabric UI for the newly created fabric using user name admin and password p@ssw0rd . These credentials are the\n default user name and password for Keycloak. IMPORTANT: Hewlett Packard\n Enterprise recommends that you change the default Keycloak password immediately after\n installation. See Changing the Keycloak admin Password . If fabric creation is successful, use the following steps to activate and register the\n fabric. NOTE: After you have successfully created a cloud-based or on-premises fabric, you\n can kill the container that hosts the edf-installer.hpe.com fabric. For\n example, at the Docker command line\n type: % docker kill If an error occurs during fabric creation, see Troubleshoot Fabric Creation later on this page. Activate and Register the Fabric After the first fabric is created, perform these steps: Add the activation key for the fabric. See Adding an Activation Key . Register the fabric. See Registering a Fabric . Set the billing model. See Setting the Billing Model . Configure single sign-on. See SSO Using Keycloak . Troubleshoot Fabric Creation Fabric creation can fail for various reasons. You might see a message like this: To view the log information, click See\n details . For example: Review the log information to determine if the failure is correctable. If you can resolve\n the failure condition, you can retry creating the fabric. However, if you want to specify\n the same fabric name that you specified for the first fabric, you must delete the failed\n fabric before retrying the installation. To delete the failed fabric, click Delete on the Fabric\n details dialog box. Then return to the Create your first\n fabric form, and retry creating the fabric. If you are not able to resolve the issue that caused fabric creation to fail, contact HPE Support . Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. (Topic last modified: 2024-02-05) On this page Seed Node Deployment Process Prerequisites for Fabric Deployment Run the Script to Bring up the Container Image on the Seed Node Create the Fabric Activate and Register the Fabric Troubleshoot Fabric Creation \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/aws_seed_node_deployment.html", + "title": "Fabric Deployment Using a Seed Node" + }, + { + "content": "\nPrerequisites for On-Premises Installation Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . Fabric Node Requirements for On-Premises Installation Before deploying on-premises, you must provide the nodes that will host the on-premises\n fabric. At least one node is required for an on-premises installation. You can provide as\n many additional nodes as you need. Nodes that you want to include in the fabric must meet\n the following criteria: Operating System The following OS versions are supported: RHEL 8.8, 8.6, 8.5, 8.4, 8.3, 8.2, 8.1 Rocky 8.5, 8.4 Ubuntu 20.04, 18.04 SLES 15 SP3, 15 SP2 OEL 8.4, 8.3, 8.2 Storage For on-premises nodes, the HPE Ezmeral Data Fabric uses all the\n storage that is available to it. The platform does not impose limits based on the\n storage tier you select when creating the fabric. HPE recommends the following minimum\n number of nodes for each storage tier, but you do not need to follow this guideline\n precisely. The software does not currently check for a specific number of nodes or a\n specific amount of storage. Storage Tier Recommended Minimum Number of Nodes 1 TB 3 10 TB 5 100 TB 7 1 PG 12 Fully Qualified Domain Names (FQDNs) The nodes must be expressed as fully-qualified domain names (FQDNs). DO NOT specify\n hostnames as aliases or IP addresses. Disk Space and Software Requirements Nodes must meet the requirements in the following table. The Installer verifies the\n requirements prior to installation. Table 1 . Node Requirements Component Requirements CPU 64-bit x86. OS RHEL, Oracle Linux, Rocky, SLES, or Ubuntu. Memory 32 GB minimum for nodes in production. Disk Raw, unformatted drives and no partitions. DNS Hostname, reaches all other nodes. Users Common users across all nodes; passwordless ssh (optional). Java Must run Java 11 or 17. Also, the Java and Java C versions must be the\n same on all nodes. Python The default Python version must be set to Python 3 on all nodes. Other NTP, Syslog, PAM. Provide at least 10 GB of free disk space on the operating system partition. Provide\n 10 GB of free disk space in the /tmp directory and 128 GB of free disk\n space in the /opt directory. Services such as the ResourceManager and\n NodeManager use the /tmp directory. Files, such as logs and cores, use\n the /opt directory. For data disks, the Installer requires a minimum disk size that is equal to the\n physical memory on the node. If a data disk does not meet the minimum disk size\n requirement, a verification error is generated. Proxy Server Requirements If nodes in the fabric use an HTTP proxy server, the nodes must also meet the\n following requirements: The no_proxy environment variable must be set. Nodes in the fabric need to be able to communicate without the use of a proxy. If\n the https_proxy and http_proxy environment\n variable is set for nodes in the fabric, you must also set the no_proxy environment variable for the fabric admin user and the root user on each node. Configure the no_proxy environment variable to the IP range of the nodes or to the sub-domain that\n contains the nodes. In addition, you must follow this guideline from the Python documentation : \"The no_proxy environment variable can be used to specify hosts which shouldn't be reached via\n proxy; if set, it should be a comma-separated list of hostname suffixes,\n optionally with :port appended, for example cern.ch,ncsa.uiuc.edu,some.host:8080 .\" For cloud-based fabrics (Amazon EC2, Google Compute Engine (GCE), and Microsoft\n Azure), you must include this entry in the no-proxy\n configuration: 169.254.169.254 The global proxy for package repositories must be set. The Installer creates repository files. However, the proxy setting is not\n configured for each repository. Therefore, configure global proxy settings on each\n node in the fabric. On CentOS/RedHat, set global proxy settings in /etc/yum.conf . On Ubuntu, set global proxy settings in /etc/apt/apt.conf . Requirements for Air-Gapped Installations An air-grapped installation is an on-premises fabric that is disconnected from the\n internet, typically to enhance security. Before installing an air-gapped installation,\n you must create local repositories for the Installer to use. See Creating a Local Repository for an Air-Gapped Installation . User Requirements for On-Premises Installation On-premises nodes must meet the following requirements for users: For all users, the numeric user and group IDs (MAPR_UID and MAPR_GUID) must be\n configured, and these values must match on all nodes. The mapr user and root user must be configured to use\n bash. Other shells are not supported. The user that initiates fabric creation for an on-premises deployment must be present\n and have the same user name and password on all fabric nodes. Using an SSH key is not supported during on-premises fabric creation. (Topic last modified: 2023-12-03) On this page Fabric Node Requirements for On-Premises Installation User Requirements for On-Premises Installation \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/prereqs_for_on-prem_deployment.html", + "title": "Prerequisites for On-Premises Installation" + }, + { + "content": "\nAWS Fabric Configuration Parameters Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new\n fabric using Amazon Web Services (AWS). Parameters with an asterisk (*) are required. Before you can initiate the Create process, you must specify all required parameters. Name* Name of the fabric. Use a name that is unique across all of your fabrics and is from 1\n to 40 characters. The name: Must start with a letter (either lowercase or uppercase). Can contain lowercase letters, uppercase letters, numbers, and hyphens. Must not contain consecutive hyphens. Must include a letter or a number as the final character. Provider The cloud provider on which to create the fabric. Select Amazon Web Services\n (AWS) . Access key ID* AWS credential Access Key. The user must have \"AmazonEC2FullAccess\" permission. Secret key* AWS credential Secret Access Key. Region* The AWS region in which to provision the fabric. Storage Tier* The consumption baseline that you specified in your license for the fabric. Your actual\n storage consumption can exceed this level. Select from these tiers: 1 TB 10 TB 100 TB 1 PB Data-at-rest encryption Data on disk (or data at rest) on a secure fabric can be encrypted, enabling you to\n protect the data if a disk is compromised. Encryption of data at rest not only prevents\n unauthorized users from accessing sensitive data, but it also protects against data theft\n via sector-level disk access. Data-at-rest encryption is ON by default. Nodes The number of nodes allocated based on the Storage tier you selected. You do not need to\n specify a number. The nodes are populated automatically. Virtual Private Cloud (VPC) ID* The AWS Virtual Private Cloud (VPC) ID to use in the selected region. For example: vpc-0b5177b19511ee301 . You must provide a VPC, and the VPC must have an\n internet gateway attached. Public subnet ID* The subnet ID to use in the selected VPC. For example: subnet-0445a49217546b101 . The public subnet must be accessible from the internet. (Topic last modified: 2023-08-27) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/aws_fabric_config_params.html", + "title": "AWS Fabric Configuration Parameters" + }, + { + "content": "\nAzure Fabric Configuration Parameters Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new\n fabric using Microsoft Azure. Parameters with an asterisk (*) are required. Before you can initiate the Create process, you must specify all required parameters. Name* Name of the fabric. Use a name that is unique across all of your fabrics and is from 1\n to 40 characters. The name: Must start with a letter (either lowercase or uppercase). Can contain lowercase letters, uppercase letters, numbers, and hyphens. Must not contain consecutive hyphens. Must include a letter or a number as the final character. Provider The cloud provider on which to create the fabric. Select Azure . Azure tenant ID* The ID of the Azure tenant. For information about how to obtain the ID, see this website . The tenant must be accessible from the internet. Subscription ID* The Azure subscription ID. Azure tenants can have one or more subscriptions, which are\n agreements with Microsoft to use Azure services. Every Azure resource is associated with a\n subscription. For information about how to obtain the ID, see this website . The subscription must have an attached internet gateway. Client ID* The ID of the Azure client (application) in the Active Directory. For information about\n how to obtain the ID, see this website . The client must be accessible from the internet. Client secret* The Azure client (application) secret in the Active Directory. For information about how\n to obtain the secret, see this website . The client secret must be accessible from the internet. Region* The Azure region in which to provision the fabric. Storage Tier* The consumption baseline that you specified in your license for the fabric. Your actual\n storage consumption can exceed this level. Select from these tiers: 1 TB 10 TB 100 TB 1 PB Data-at-rest encryption Data on disk (or data at rest) on a secure fabric can be encrypted, enabling you to\n protect the data if a disk is compromised. Encryption of data at rest not only prevents\n unauthorized users from accessing sensitive data, but it also protects against data theft\n via sector-level disk access. Data-at-rest encryption is ON by default. Nodes The number of nodes allocated based on the Storage tier you selected. You do not need to\n specify a number. The nodes are populated automatically. Resource group name* The name of the Azure resource group. The resource group is a container that comprises\n multiple resources and facilitates the management of those resources. Virtual network* The name of the Azure Virtual Network (VNet). Subnetwork* The name of the subnet in your virtual network to be used for the fabric. (Topic last modified: 2023-08-27) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/azure_fabric_config_params.html", + "title": "Azure Fabric Configuration Parameters" + }, + { + "content": "\nGCP Fabric Configuration Parameters Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new\n fabric using Google Cloud Platform (GCP). Parameters with an asterisk (*) are required. Before you can initiate the Create process, you must specify all required parameters. Name* Name of the fabric. Use a name that is unique across all of your fabrics and is from 1\n to 40 characters. The name: Must start with a lowercase letter. Can contain lowercase letters, numbers, and hyphens. Must not contain consecutive hyphens. Must include a lowercase letter or a number as the final character. Provider The cloud provider on which to create the fabric. Select Google Cloud\n Platform (GCP) . Service account key file* A file containing your GCP service account credentials. For more information, see Create and delete service account keys . Zone* The GCP zone in which to provision the fabric. Storage Tier* The consumption baseline that you specified in your license for the fabric. Your actual\n storage consumption can exceed this level. Select from these tiers: 1 TB 10 TB 100 TB 1 PB Data-at-rest encryption Data on disk (or data at rest) on a secure fabric can be encrypted, enabling you to\n protect the data if a disk is compromised. Encryption of data at rest not only prevents\n unauthorized users from accessing sensitive data, but it also protects against data theft\n via sector-level disk access. Data-at-rest encryption is ON by default. Nodes The number of nodes allocated based on the Storage tier you selected. You do not need to\n specify a number. The nodes are populated automatically. VPC network* The identifier for the VPC. The VPC must have an internet gateway attached. Subnetwork* The identifier for the public subnet. (Topic last modified: 2023-08-27) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/gcp_fabric_config_params.html", + "title": "GCP Fabric Configuration Parameters" + }, + { + "content": "\nOn-Premises Fabric Configuration Parameters Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new\n fabric that is hosted on-site. Parameters with an asterisk (*) are required. Before you can initiate the Create process, you must specify all required parameters. Creating an on-premises fabric requires that you provide host nodes before starting\n fabric creation. These nodes must meet certain prerequisites. Before creating an on-premises\n fabric, review Prerequisites for On-Premises Installation . Name* Name of the fabric. Use a name that is unique across all of your fabrics and is from 1\n to 40 characters. The name: Must start with a letter (either lowercase or uppercase). Can contain lowercase letters, uppercase letters, numbers, and hyphens. Must not contain consecutive hyphens. Must include a letter or a number as the final character. Provider The cloud provider on which to create the fabric. Select On-premises . Username* The SSH username. Password* The SSH password. Airgap repository The repository for the Installer to use if your installation cannot access the internet.\n The repository must contain nested folders. For example: ./installer/redhat . You must create this repository before installing\n an air-gapped fabric. See Creating a Local Repository for an Air-Gapped Installation . Data-at-rest encryption Data on disk (or data at rest) on a secure fabric can be encrypted, enabling you to\n protect the data if a disk is compromised. Encryption of data at rest not only prevents\n unauthorized users from accessing sensitive data, but it also protects against data theft\n via sector-level disk access. Data-at-rest encryption is ON by default. Nodes* The recommended minimum number of nodes that should be allocated. The form provides this\n information based on the Storage tier you selected. Node FQDN The fully qualified domain name of a node that will host the fabric. This is a required\n field. Specify the FQDN of a node that you provided, as described in Prerequisites for On-Premises Installation .\n For example: mynode.lab.mycompany.net . Use fully qualified domain\n names (FQDNs). DO NOT specify hostnames as aliases or IP addresses. If you are using multiple nodes, click the Add node button to add\n as many additional nodes as you provisioned. EDF subnet The Data Fabric subnet. This parameter is optional. This\n parameter allows you to set a subnet mask to restrict fabric services to a subset of\n network interface cards (NICs). Specify one or more comma-separated subnet masks. For\n example: 10.10.15.0/24,10.10.16.0/24 EDF external The Data Fabric external IP addresses for the CLDB, file\n system, and MAST Gateway nodes. This parameter is optional. This parameter allows you to\n designate a specific IP of the host as a public IP address to handle the external traffic\n targeted to the host. Specify a comma-separated list of tuples using this format: : . For\n example: host1.corp.net:1.1.1.1,host2.corp.net:1.1.1.2,host3.corp.net:1.1.1.3 (Topic last modified: 2024-01-30) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/on_premise_fabric_config_params.html", + "title": "On-Premises Fabric Configuration Parameters" + }, + { + "content": "\nCreating a Local Repository for an Air-Gapped Installation Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Creating a Local Repository on RHEL, Rocky, or Oracle Linux Describes how to create and use a local repository for RHEL, Rocky, or Oracle Linux. Creating a Local Repository on SLES Describes how to create and use a local repository for SLES. Creating a Local Repository on Ubuntu Describes how to create and use a local repository for Ubuntu. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an\n air-gapped installation. You can set up a local repository on each node to provide access to installation packages.\n With this method, nodes do not require internet connectivity. The package manager on each node\n installs from packages in the local repository. To set up a local repository, nodes need\n access to a running web server to download the packages. Creating a Local Repository on RHEL, Rocky, or Oracle Linux Describes how to create and use a local repository for RHEL, Rocky, or Oracle Linux. Creating a Local Repository on SLES Describes how to create and use a local repository for SLES. Creating a Local Repository on Ubuntu Describes how to create and use a local repository for Ubuntu. (Topic last modified: 2023-12-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/local_repo_air_gapped_install.html", + "title": "Creating a Local Repository for an Air-Gapped Installation" + }, + { + "content": "\nCreating a Local Repository on RHEL, Rocky, or Oracle Linux Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Creating a Local Repository on RHEL, Rocky, or Oracle Linux Describes how to create and use a local repository for RHEL, Rocky, or Oracle Linux. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Creating a Local Repository on RHEL, Rocky, or Oracle Linux Describes how to create and use a local repository for RHEL, Rocky, or Oracle Linux. Creating a Local Repository on SLES Describes how to create and use a local repository for SLES. Creating a Local Repository on Ubuntu Describes how to create and use a local repository for Ubuntu. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Creating a Local Repository on RHEL, Rocky, or Oracle Linux Describes how to create and use a local repository for RHEL, Rocky, or Oracle\n Linux. Ensure that you have access to the HPE internet repository so that you can download\n package files. For more information, see Accessing the HPE Ezmeral Token-Authenticated Internet Repository . On the machine where you will set up the repository, log in as root or\n use sudo . Create the following directory if it does not exist: /var/www/html/yum/base On a computer that is connected to the internet, download the following files,\n substituting the appropriate number and : https://package.ezmeral.hpe.com/releases/v7.x.x/redhat/mapr-GA.rpm.tgz\nhttps://package.ezmeral.hpe.com/releases/MEP/MEP-/redhat/mapr-mep--.rpm.tgz Copy the files to /var/www/html/yum/base on the node, and extract them\n there: tar -xvzf mapr-vGA.rpm.tgz\ntar -xvzf mapr-mep-v..rpm.tgz Create the base repository headers by using the following\n command: createrepo /var/www/html/yum/base HPE\n software assumes that the web server root directory is: /var/www/html . When finished, verify the content of the new /var/www/html/yum/base/repodata directory: filelists.xml.gz, other.xml.gz, primary.xml.gz, repomd.xml (Topic last modified: 2023-12-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/creating_local_repo_rhel_rocky_oel.html", + "title": "Creating a Local Repository on RHEL, Rocky, or Oracle Linux" + }, + { + "content": "\nCreating a Local Repository on SLES Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Creating a Local Repository on SLES Describes how to create and use a local repository for SLES. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Creating a Local Repository on RHEL, Rocky, or Oracle Linux Describes how to create and use a local repository for RHEL, Rocky, or Oracle Linux. Creating a Local Repository on SLES Describes how to create and use a local repository for SLES. Creating a Local Repository on Ubuntu Describes how to create and use a local repository for Ubuntu. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Creating a Local Repository on SLES Describes how to create and use a local repository for SLES. Ensure that you have access to the HPE internet repository so that you can download\n package files. For more information, see Accessing the HPE Ezmeral Token-Authenticated Internet Repository . On the machine where you will set up the repository, log in as root or\n use sudo . Create the following directory if it does not exist: /var/www/html/zypper/base On a computer that is connected to the internet, download the following files,\n substituting the appropriate and : https://package.ezmeral.hpe.com/releases/v/suse/mapr-GA.rpm.tgz\nhttps://package.ezmeral.hpe.com/releases/MEP/MEP-/suse/mapr-mep--.rpm.tgz Copy the files to /var/www/html/zypper/base on the node, and extract\n them there: tar -xvzf mapr-GA.rpm.tgz\ntar -xvzf mapr-mep--.rpm.tgz Create the base repository\n headers: createrepo /var/www/html/zypper/base When finished, verify the content of the new /var/www/html/zypper/base/repodata directory: filelists.xml.gz, other.xml.gz, primary.xml.gz, repomd.xml (Topic last modified: 2023-12-04) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/creating_local_repo_sles.html", + "title": "Creating a Local Repository on SLES" + }, + { + "content": "\nCreating a Local Repository on Ubuntu Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Creating a Local Repository on Ubuntu Describes how to create and use a local repository for Ubuntu. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Creating a Local Repository on RHEL, Rocky, or Oracle Linux Describes how to create and use a local repository for RHEL, Rocky, or Oracle Linux. Creating a Local Repository on SLES Describes how to create and use a local repository for SLES. Creating a Local Repository on Ubuntu Describes how to create and use a local repository for Ubuntu. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Creating a Local Repository on Ubuntu Describes how to create and use a local repository for Ubuntu. Ensure that you have access to the HPE internet repository so that you can download\n package files. For more information, see Accessing the HPE Ezmeral Token-Authenticated Internet Repository . On the machine where you will set up the repository, log in as root . Change to the directory /root , and create the following directories\n within it: ~/mapr\n|---dists\n|------binary\n|---------optional\n|------------binary-amd64\n|---mapr On a computer that is connected to the internet, download the following files,\n substituting the appropriate and\n : https://package.ezmeral.hpe.com/releases/v7.x.x/ubuntu/mapr-GA.deb.tgz\nhttps://package.ezmeral.hpe.com/releases/MEP/MEP-/ubuntu/mapr-mep--.deb.tgz Copy the files to /root/mapr/mapr on the node, and extract them\n there: tar -xvzf mapr-GA.deb.tgz\ntar -xvzf mapr-mep--.deb.tgz Navigate to the /root/mapr directory. Use dpkg-scanpackages to create Packages.gz in the binary-amd64 directory: dpkg-scanpackages . /dev/null | gzip -9c > ./dists/binary/optional/binary-amd64/Packages.gz Move the entire /root/mapr/mapr directory to the default directory\n served by the HTTP server (for example, /var/www ), and make sure the HTTP\n server is running. (Topic last modified: 2023-12-04) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/creating_local_repo_ubuntu.html", + "title": "Creating a Local Repository on Ubuntu" + }, + { + "content": "\nTroubleshooting Seed Node Installation Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node\n installation. Issue: Seed Node Container Services Do Not Come Up Use these steps to resolve the problem: After running the script to deploy the container for the Data Fabric image, wait for at least 10 minutes for services to\n come up. If the services do not come up, the screen can display a message such as the\n following: 7.6.0-mapr-devdocker-container % ./datafabric_container_setup.sh\nPlease enter the local sudo password for \nPassword:\n\nlatest: Pulling from maprtech/edf-seed-container\nDigest: sha256:052f461d98b1d0b8251cd47bab71b42103e61aaaa33d31335d3ca60182f4a87e\nStatus: Image is up to date for maprtech/edf-seed-container:latest\ndocker.io/maprtech/edf-seed-container:latest\nDeveloper Sandbox Container b4be66858760 is running..\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\nservices required for Ezmeral Data fabric are coming up\n\nservices required for Ezmeral Data fabric are coming up\n\nservices didnt come up in stipulated 10 mins time \nplease login to the container using ssh root@localhost -p 2222 with mapr as password and check further \nFor documentation on steps to debug, see https://docs.ezmeral.hpe.com/datafabric/home/installation/troubleshooting_seed_node_installation.html \nonce all services are up fabric UI is available at https://:8443/app/dfui and fabrics can be deployed from that page Sign in to the Docker container using mapr as the\n password: ssh root@localhost -p 2222 Enter the jps command and check the output. Continue entering the jps command until the command shows the AdminApplication java process, which indicates that all the services\n are started: root@edf-installer:~# jps\n71136 FsShell\n71315 Jps\n19349 AdminApplication\n10024 WardenMain\n14236 CLDB\n13213 QuorumPeerMain If the services do not start, check that sufficient resources have been allocated to\n the seed node. See the \"Seed Node Prerequisites\" in Fabric Deployment Using a Seed Node . You might need to allocate more resources and retry installing the seed node. Check to see if the Warden and ZooKeeper services are up and\n running: systemctl status mapr-warden\nsystemctl status mapr-zookeeper If the services did not start within 10 minutes, check the following logs for errors\n or exceptions. If the logs contain errors or exceptions, contact HPE Support . If\n there are no errors or exceptions, start the services (see step 7). /opt/mapr/logs/cldb.log /opt/mapr/logs/configure.log /opt/mapr/logs/warden.log /opt/mapr/apiserver/logs/apiserver.log /opt/mapr/zookeeper/zookeeper-3.5.6/logs/zookeeper.log If the Warden and ZooKeeper services are not up and running, try restarting the\n services manually: systemctl start mapr-zookeeper\nsystemctl start mapr-warden (Topic last modified: 2024-01-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/troubleshooting_seed_node_installation.html", + "title": "Troubleshooting Seed Node Installation" + }, + { + "content": "\nPlanning Worksheet for Cloud Deployments Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud\n deployment. For installation information, see Fabric Deployment Using a Seed Node . Host and Networking Configuration Information Description Value Seed node host name Network Interface Card (NIC) name Proxy settings export http_proxy= export https_proxy= export HTTP_PROXY= export HTTP_PROXY= AWS Information AWS Parameter Value Name Provider Amazon Web Services (AWS) Access key ID Secret key Region Storage Tier Data-at-rest encryption Virtual Private Cloud (VPC) ID Public subnet ID Azure Information Azure Parameter Value Name Provider Azure Azure tenant ID Subscription ID Client ID Client secret Region Storage Tier Data-at-rest encryption Resource group name Virtual network Subnetwork GCP Information GCP Parameter Value Name Provider Google Cloud Platform (GCP) Service account key file Zone Storage Tier Data-at-rest encryption VPC network Subnetwork (Topic last modified: 2024-01-19) On this page Host and Networking Configuration Information AWS Information Azure Information GCP Information \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/planning_worksheet_cloud.html", + "title": "Planning Worksheet for Cloud Deployments" + }, + { + "content": "\nHelp for datafabric_container_setup.sh Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Fabric Deployment Using a Seed Node Describes how to install the platform using a seed node and the Create Fabric interface. Prerequisites for On-Premises Installation Describes fabric node and user prerequisites for on-premises installation of the HPE Ezmeral Data Fabric . AWS Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Amazon Web Services (AWS). Azure Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Microsoft Azure. GCP Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric using Google Cloud Platform (GCP). On-Premises Fabric Configuration Parameters This page describes the configuration values that you need to specify to create a new fabric that is hosted on-site. Creating a Local Repository for an Air-Gapped Installation Describes how to make installation packages available through a local repository for an air-gapped installation. Troubleshooting Seed Node Installation Describes some common issues that can interfere with seed node installation. Planning Worksheet for Cloud Deployments Print this worksheet, and use it to record configuration information for your cloud deployment. Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Help for datafabric_container_setup.sh From the Docker command line, you can access the help text for the datafabric_container_setup.sh script. To view the help for the setup script, use the ./datafabric_container_setup.sh\n -h command: % ./datafabric_container_setup.sh -h\nThis script will take of deploying edf on seed node.\n\nSyntax: ./datafabric_container_setup.sh [-i|--image] [-p|--publicipv4dns] [-f|--proxyfiledetails]\noptions:\n-i|--image this is optional,By defaul it will pull image having latest tag,\n we can also provide image which has custom tag example:maprtech/edf-seed-container:7.4.0_9.1.2\n-p|--publicipv4dns is the public IPv4 DNS and needed for cloud deployed seed nodes. Note that both inbound and outbound trafic on port 8443\n needs to be enabled on the cloud instance. Otherwise, the Data Fabric UI cannot be acessible\n-f|--proxyfiledetails is the location of file from where proxy details provided by user are copied to docker container. Normally, using the -i|--image option is not needed. You may provide the -i option if you want to specify a specific image. If you do not provide\n the option, the latest available image is downloaded. The -f|--proxyfiledetails option allows you to specify proxy information in a\n file.\n On\n Linux nodes, if you do not provide the -f option, the contents of /etc/profilled/proxy.sh and /etc/environment are appended\n and copied to the container. On a Mac seed node, if you do not provide the -f option, no proxy details are copied. (Topic last modified: 2023-10-30) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/help_for_script.html", + "title": "Help for datafabric_container_setup.sh" + }, + { + "content": "\nService Activation and Billing Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated\n billing. When you install HPE Ezmeral Data Fabric software, you have the option to install in a\n connected environment or air-gapped environment: Environment Description Connected An environment that has continuous internet access. Air-gapped An environment that has no internet access, usually for the purpose of\n increasing security. The activation and billing processes differ for each type of installation. In a\n connected environment, billing is an automated process. In an air-gapped environment, the\n billing process is manual and requires an activation code. The following sections describe what you need to do to activate your Data Fabric and enable\n billing to keep the fabric operational. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. (Topic last modified: 2023-08-20) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/service_activation_and_billing.html", + "title": "Service Activation and Billing" + }, + { + "content": "\nAdding an Activation Key Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. An activation key is provided when you purchase a consumption-based license. Adding the\n activation key enables the Data Fabric UI to display important details about your license,\n including the start date and expiration dates. You must be a fabric manager to add an\n activation key. To add an activation key: Sign in to the Data Fabric UI , and switch to the Fabric manager experience . Click the Fabric administration button. On the Activation card, click Add activation\n key . The Add activation key form appears. For the Input type , select File or Text . If you selected File as the Input type ,\n drag and drop the activation key file into the box on the form. Or, click Select File to navigate to the file and select it. If you selected Text as the Input type ,\n copy and paste your activation key information into the box on the form. Click Add . Your activation details become visible on the Activation card. More information Obtaining a License Registering a Fabric Setting the Billing Model (Topic last modified: 2024-01-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/licensing/adding_an_activation_key.html", + "title": "Adding an Activation Key" + }, + { + "content": "\nRegistering a Fabric Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the\n fabric or adding a new fabric. Registration provides HPE with information about your internet\n connection and determines the billing process that you will use. You must be a fabric manager to register a fabric. If your fabric is in an air-gapped\n environment, you must obtain an activation code, which is provided when you purchase a\n consumption-based license. To obtain a license, see Obtaining a License . If your fabric is behind a proxy, registration for the Connected mode can fail unless you first configure the proxy server. See Configuring a Proxy Server for Data Fabric Access to the Internet . To register a fabric: Sign in to the Data Fabric UI , and switch to the Fabric manager experience . Click the Fabric administration button. On the Activation card, click Actions , and\n select Register fabric . The Register fabric form appears. Select the Operational mode as Air-gapped or Connected : Mode Description Air-gapped The fabric is not connected to the Internet. In the air-gapped mode, you\n must provide an activation code to the Data Fabric UI . Connected The fabric is connected to the Internet and can communicate with the HPE\n billing service. In the connected mode, the system automatically communicates an\n activation code to HPE. For an air-gapped deployment, upload the activation code file that you received when you\n purchased your license. You can drag and drop the file into the box on the form, or click Select File to navigate to the code file. Click Register . Your registration details become visible on the Activation card. More information Setting the Billing Model Viewing Activation Information (Topic last modified: 2023-11-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/licensing/registering_a_fabric.html", + "title": "Registering a Fabric" + }, + { + "content": "\nSetting the Billing Model Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Setting the billing model is optional. However, if you set the billing model, the Data Fabric UI can show estimates of your aggregated and\n on-demand billing charges on the Billing and Storage Consumption card.\n See View Billing Data by Fabric . To set the billing model, you must enter information provided to you by HPE when you\n purchased your license for the fabric. Use these steps to provide the information: Sign in to the Data Fabric UI , and switch to the Fabric manager experience . Click the Fabric administration button. On the Activation card, click Set billing\n model . The Set billing model form appears. Fill in the form as follows (an asterisk (*) denotes required fields): Commit amount* The minimum amount of storage that you committed to purchase in your Data Fabric license. Unit The units for the commit amount. Available units are: TB (terabytes) PB (petabytes) EB (exabytes) ZB (zettabytes) Commit rate* The monthly storage charge in dollars ($) per GB hour that you committed to when\n you purchased your license. To ensure that the UI provides accurate estimates, be\n sure to factor in any discount in the Commit rate that you\n received from HPE. On-demand rate* The rate in dollars ($) per GB hour for storage use in excess of your commit\n amount. To ensure that the UI provides accurate estimates, be sure to factor in any\n discount in the On-demand rate that you received from\n HPE. Click Save . (Topic last modified: 2024-01-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/licensing/setting_billing_model.html", + "title": "Setting the Billing Model" + }, + { + "content": "\nViewing Activation Information Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Viewing Activation Information Use the Data Fabric UI to view important activation\n information, such as the status of your activation key and activation code (for air-gapped\n installations). To view activation information: Sign in to the Data Fabric UI , and switch to the Fabric manager experience . Click the Fabric administration button. Locate the Activation card, which displays detailed information\n about your Activation code (for air-gapped installations) and Activation key. (Topic last modified: 2024-01-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/licensing/viewing_activation_information.html", + "title": "Viewing Activation Information" + }, + { + "content": "\nDisplaying the Fabric ID Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Displaying the Fabric ID Describes how to display the fabric ID. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Displaying the Fabric ID Describes how to display the fabric ID. You must supply your fabric ID in order to obtain a\n license . Each fabric has a unique fabric ID (sometimes also referred to as a \u201ccluster\n ID\u201d). Before you apply an activation key to your fabric, you can display the fabric ID by using the\n following command at a Linux command prompt: $ cat /opt/mapr/conf/clusterid\n4626587677795940777 After you add a valid activation key, the Data Fabric UI displays the fabric ID in the Fabric administration page. For\n example: To view Fabric administration information: Sign in to the Data Fabric UI , and switch to the Fabric manager experience . Click the Fabric administration button. (Topic last modified: 2023-10-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/licensing/displaying_fabric_id.html", + "title": "Displaying the Fabric ID" + }, + { + "content": "\nObtaining a License Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software\n Center. To obtain a license: For new deployments, install the fabric as described in Installation . Or, for existing deployments, create a new fabric as described in Creating a Fabric , or import a fabric as described in Importing a Fabric . Note the ID of the new fabric. To obtain a license, you must supply the fabric ID. See Displaying the Fabric ID . After purchasing HPE Ezmeral Data Fabric software, a license key is made available to\n you through the Access Your Products button in the HPE\n Subscription Electronic Receipt email that you receive from HPE. This\n receipt will direct you to MY HPE SOFTWARE CENTER where you can activate your product Log in to the MY HPE SOFTWARE CENTER with your HPE Passport user ID and\n password. You should see an Activate EON: page. In the Qty to Activate field, specify 1. Click Confirm Selection . In Step 2: Designate Activatee, click Next . In Step 3, enter your cluster ID (fabric ID). Click Activate . The activation process can take several minutes\n to complete. Eventually, the HPE Ezmeral Data Fab SW Base SaaS page is displayed. Click the box to accept the license terms and authorizations. Click the box for Licenses Keys (3) . Click Download . The licenses are downloaded as .DAT files. You can now add an activation key to your fabric. See Adding an Activation Key . (Topic last modified: 2023-07-23) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/licensing/obtaining_a_license.html", + "title": "Obtaining a License" + }, + { + "content": "\nBilling in Connected Environments Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Billing in Connected Environments Describes how billing is enabled in a connected environment. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Billing in Connected Environments Describes how billing is enabled in a connected environment. Automatic Billing A connected environment is an installation in which the fabric is connected to the internet\n and can communicate with the HPE billing service. In a connected environment, billing and\n activation are initiated automatically after you add an\n activation key and register a\n fabric . Viewing the Billing and Consumption Information for a Connected Fabric For a connected environment, the Data Fabric UI displays billing and consumption information on the Billing and Storage\n Consumption card. Note that cost information is estimated. This information is\n based on the rates you specified in Setting the Billing Model . The actual cost\n reflected on the billing portal might be different. To view the billing information: Sign in to the Data Fabric UI , and switch to the Infrastructure admin view or Fabric\n manager view . Click Fabric metrics . Scroll down to see the Billing and Storage Consumption card. Viewing the Operational Mode for a Connected Fabric The operational mode of a fabric refers to the internet connection status, which can be Connected or Air-gapped . To view the operational\n mode: Sign in to the Data Fabric UI , and switch to the Fabric manager view . Click Fabric administration . Locate the Activation card. The Activation\n code section of the card displays the operational mode. If You Forget to Pay Your Invoice If you forget to pay your invoice or fail to renew an expired license, a connected fabric\n can be disabled by HPE. If you suspect that the fabric has been disabled, contact HPE\n Support. To restore the fabric, see Restoring a Disabled Fabric . (Topic last modified: 2023-11-11) On this page Automatic Billing Viewing the Billing and Consumption Information for a Connected Fabric Viewing the Operational Mode for a Connected Fabric If You Forget to Pay Your Invoice \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/service_activation_connected.html", + "title": "Billing in Connected Environments" + }, + { + "content": "1. Add an Activation Key\n2. Register a New Air-Gapped Fabric\n3. Collect Usage Records\n4. Send the Usage Record File to HPE\n5. Pay Your Monthly Invoice\n6. Renew Your Activation\nBilling in Air-Gapped Environments Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. In an air-gapped environment, manual steps are needed to support billing and activation for a\n consumption-based fabric. This section describes how to activate an air-gapped fabric and keep\n the fabric operational. Using maprcli Commands in an Air-Gapped environment Some tasks for keeping an air-gapped fabric operational require you to use maprcli commands. This is because certain operations are not currently\n available in the Data Fabric UI . The maprcli commands you need are provided on this page. To run maprcli commands, use an ssh connection to any\n node in the fabric. Understanding the Activation Code and Billing Cycle When you place an order for the HPE Ezmeral Data Fabric and specify an\n air-gapped environment, HPE provides you with an activation code. The activation code allows\n you to register the product and sign usage records for one billing cycle. The billing cycle\n is one month with a 15-day grace period. The activation code has two important dates: Start Date \u2013 The first day of the one-month billing cycle. End Date \u2013 The end of the month-long billing cycle and the start of the grace\n period. This date is usually 30 days after the Start Date. You can view these dates by using the Data Fabric UI : Sign in to the Data Fabric UI , and switch to the Fabric manager view. Click Fabric administration . Locate the Activation card. The Activation\n code section of the card displays the start date, end date, and current\n month charges. Once the activation code is applied, the code is valid (and the fabric is operational)\n until the End Date . After the End Date , a short grace period is applied to\n allow you to perform the steps to maintain activation. Steps for Maintaining Activation For an air-gapped environment, the fabric administrator must perform the following steps to\n keep the fabric activated: Step Task When 1. Add an activation key. Installation time 2. Register the fabric. Installation time 3. Collect usage records. Monthly 4. Send a usage record file to HPE. Monthly 5. Pay the monthly HPE invoice. Monthly 6. Renew your activation to keep the fabric operational. Monthly 1. Add an Activation Key Regardless of your fabric's operational mode, you must obtain a license and add the\n activation key from the license by using the Data Fabric UI . See Adding an Activation Key . 2. Register a New Air-Gapped Fabric For a new air-gapped fabric, you must register the fabric by selecting the operational mode\n and uploading the activation code provided by HPE Support when you ordered the product. See Registering a Fabric . After registration, the fabric is usable for a month with a 15-day grace period. The fabric\n continues to be usable as long as you continue to pay your monthly bill and reapply new\n activation keys. 3. Collect Usage Records On or near the first day of each month, the fabric administer should collect usage records\n for the previous month. The following maprcli command collects your usage\n records in a file named usage_file.txt : maprcli cluster getbillingusage -fileName usage_file.txt -clearText true Licensing for the HPE Ezmeral Data Fabric is consumption based, meaning\n that you are charged based on actual usage. Usage is measured in storage-hour units. For both connected and air-gapped Data Fabrics, the container location database (CLDB) collects usage metrics. The CLDB\n analyzes the logical data size of all volumes in the fabric and records the average\n consumption for each hour at the end of the hour. Even before the fabric is activated and\n billing is enabled, you can view the recorded metrics by using the following maprcli command: maprcli cluster getmeteringusage -from -till -cleartext true For\n example: $ maprcli cluster getmeteringusage -from 2021-01-01,00:00 -till 2023-05-13,00:00 -clearText true \nuserdata metadata total epoch timestamp \n1 Mb 0 Mb 1 Mb 1683716400000 Wed May 10 11:00:00 UTC 2023 \n1 Mb 0 Mb 1 Mb 1683720000000 Wed May 10 12:00:00 UTC 2023 \n1 Mb 0 Mb 1 Mb 1683723600000 Wed May 10 13:00:00 UTC 2023 \n1 Mb 0 Mb 1 Mb 1683727200000 Wed May 10 14:00:00 UTC 2023 \n1 Mb 0 Mb 1 Mb 1683730800000 Wed May 10 15:00:00 UTC 2023 4. Send the Usage Record File to HPE To share your usage record file and obtain a new activation code (every 30 days), complete\n the following steps: Open a support case at https://support.hpe.com using the account you have on the HPE Support Center customer\n portal, and include the following information: Fabric ID (cluster ID) Current activation code Usage record file When HPE Support updates the ticket, go to your customer portal to get the new\n activation key. 5. Pay Your Monthly Invoice Each month you must pay the HPE invoice before the 15-day grace period ends. Otherwise, the\n fabric can be disabled, as described in Restoring a Disabled Fabric . 6. Renew Your Activation As long as you continue to provide usage records and pay your monthly invoice within the\n billing grace period, HPE will continue to provide an activation code that allows you to\n renew your activation. After obtaining the new activation code from the customer portal, use the following maprcli command to renew your\n activation: maprcli cluster startup set -activationkey For\n example: maprcli cluster startup set -activationkey /tmp/Renew_key_mycluster.text -is_file true -json Log Information To view log information for service activation and billing, see the main CLDB\n log: /opt/mapr/conf/cldb.log (Topic last modified: 2023-11-12) On this page Using maprcli Commands in an Air-Gapped environment Understanding the Activation Code and Billing Cycle Steps for Maintaining Activation 1. Add an Activation Key 2. Register a New Air-Gapped Fabric 3. Collect Usage Records 4. Send the Usage Record File to HPE 5. Pay Your Monthly Invoice 6. Renew Your Activation Log Information \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/service_activation_air_gapped.html", + "title": "Billing in Air-Gapped Environments" + }, + { + "content": "\nRestoring a Disabled Fabric Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. If you forget to pay your invoice, a fabric can be disabled by HPE. If your contract terms\n are not met, HPE activates a \"kill switch\" that causes the CLDBs to restart, eventually\n causing the fabric to enter a non-functional state. To check the fabric status, use the maprcli cluster services status command.\n For example: $ maprcli cluster services status -json\n{\n \"timestamp\":1691100826677,\n \"timeofday\":\"2023-08-03 03:13:46.677 GMT-0700 PM\",\n \"status\":\"OK\",\n \"total\":1,\n \"data\":[\n {\n \"status\":\"ENABLED\"\n }\n ]\n} If you suspect that the fabric has been disabled, contact HPE Support. HPE Support can supply\n a special activation key that you can use to restore the fabric. With the activation key, you\n can restore the fabric. (Topic last modified: 2023-11-12) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/licensing/restoring_a_disabled_fabric.html", + "title": "Restoring a Disabled Fabric" + }, + { + "content": "\nDisplaying a maprcli Prompt Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. Adding an Activation Key You must add an activation key after installing the HPE Ezmeral Data Fabric or adding a new fabric. Registering a Fabric You register the HPE Ezmeral Data Fabric after installing the fabric or adding a new fabric. Registration provides HPE with information about your internet connection and determines the billing process that you will use. Setting the Billing Model Setting the billing model enables the Data Fabric UI to display estimated billing charges for each fabric. Viewing Activation Information Use the Data Fabric UI to view important activation information, such as the status of your activation key and activation code (for air-gapped installations). Displaying the Fabric ID Describes how to display the fabric ID. Obtaining a License Describes the process of obtaining a consumption-based license from the My HPE Software Center. Billing in Connected Environments Describes how billing is enabled in a connected environment. Billing in Air-Gapped Environments Describes how billing is enabled in an air-gapped environment. Restoring a Disabled Fabric Describes how to obtain an activation key to restore a disabled fabric. Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform certain configuration tasks. The steps for displaying a maprcli prompt are the same for all cloud-based deployments but are different for on-premises deployments. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Displaying a maprcli Prompt You can use maprcli commands to register the fabric and perform\n certain configuration tasks. The steps for displaying a maprcli prompt are the\n same for all cloud-based deployments but are different for on-premises deployments. maprcli Prompt for an On-Premises Fabric To run maprcli commands, use an ssh connection to any\n node in the fabric. maprcli Prompt for an AWS, Azure, or GCP Fabric See SSH Access to a Cloud-Based Fabric . (Topic last modified: 2023-11-03) On this page maprcli Prompt for an On-Premises Fabric maprcli Prompt for an AWS, Azure, or GCP Fabric \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/maprcli_prompt.html", + "title": "Displaying a maprcli Prompt" + }, + { + "content": "\nSSO Using Keycloak Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . SSO Using\n Keycloak Describes how single sign-on (SSO) is implemented by using\n Keycloak. Keycloak Is Preinstalled and\n Preconfigured Keycloak is the identity and access management (IAM) solution that\n provides single-sign-on (SSO) support for the Data Fabric .\n Starting with release 7.5.0, Keycloak is preinstalled and preconfigured whenever you create\n a new fabric. During fabric creation, Keycloak is installed on all the nodes in the\n fabric. However, the Keycloak server is started on only one node. If new fabrics are created\n from the first fabric, Keycloak is installed on all the new fabric nodes, but the primary\n Keycloak node continues to serve the new fabrics. At installation, Keycloak is\n preconfigured with users, groups, and roles that enable integration of Keycloak with the Data Fabric . The following table describes the\n preconfigured items: Keycloak Preconfigured Items How Many? Names Notes Users 1 admin Any additional users that are added must be created with uid and gid attributes, as described in Adding New Users to Keycloak . Groups 1 fabric-manager Any additional groups that are added must be created with the gidNumber attribute, as described in Adding a Group to Keycloak . Roles 3 fabric-manager infrastructure-admin developer These are the only supported roles. The developer role is\n sometimes referred to as the \"fabric user\" role. Clients 1 edf-client This is the dedicated client for the Data Fabric . In Keycloak, a client is an application or\n service that can request authentication for a user. Keycloak installation also gives you access to the Keycloak admin\n portal. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . (Topic last modified: 2023-10-30) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/sso_configuration.html", + "title": "SSO Using Keycloak" + }, + { + "content": "\nAccessing the Keycloak Administration Console Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak\n and your SSO users. If a new fabric has been created, you can access the Keycloak administration console by\n using these steps: In a browser, append port 6443 to the URL for your first fabric. This is the URL\n provided by the seed node procedure following successful fabric creation. For\n example: https://:6443 Click Administration Console : The Sign In page is\n displayed: Sign in using the default credentials: Username: admin Password: p@ssw0rd IMPORTANT: HPE recommends that you change the password for the admin user soon after sign in. See Changing the Keycloak admin Password . (Topic last modified: 2023-11-27) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/accessing_the_keycloak_admin_portal.html", + "title": "Accessing the Keycloak Administration Console" + }, + { + "content": "\nChanging the Keycloak admin Password Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent\n unauthorized access to Keycloak and your Data Fabric user\n information. The default admin password provided in the bundled version of Keycloak is\n a well-known password that must be changed immediately after installation. Use these steps\n to change the password: Sign in to the Keycloak administration console as described in Accessing the Keycloak Administration Console . The master realm information is\n displayed: In the top right corner of the page, click the down arrow for the admin user, and select Manage\n account : The account management information is displayed. Click the Personal Info : The Personal Info page is\n displayed. In the left navigation pane, under Account security , click Signing in : The Signing in page is\n displayed. On the Signing In page, click Update : Keycloak asks you to re-authenticate. Type the default admin password again: The Update password page is\n displayed. Enter your new credentials, and click Submit : Click Back to security admin console to return to the\n administration console: In the top right corner of the page, click the down arrow for the admin user, and select Sign\n out : Repeat step 1, signing in to the Keycloak administration console as described in Accessing the Keycloak Administration Console . On the Sign\n In page, sign in as the admin user with your new password: (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/changing_the_keycloak_admin_password.html", + "title": "Changing the Keycloak admin Password" + }, + { + "content": "\nAdding New Users to Keycloak Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . By default, the Keycloak software provided with release 7.5.0 and later is preconfigured\n with only one user (the admin user). To add new users: Sign in to the Keycloak administration console as described in Accessing the Keycloak Administration Console . The master realm information is\n displayed. In the left navigation pane, click Users : The Users page is\n displayed, showing the preconfigured admin user. Click Add user : The Create user page is\n displayed. In the Username* field, type the name of a new user, and\n click Create : The User details page for the\n new user is displayed. Click the Attributes tab: The Attributes page is\n displayed. Enter uid and gid values for the new user: In the Key field, type uid , then\n specify a uid value, such as 12345 , in the Value field. Click Add an attribute . In the second Key field, type gid ,\n then specify a gid value, such as 12345 ,\n in the Value field: Click Save . Click the Credentials tab. The Credentials page shows No\n credentials . Click Set password : The Set password for\n dialog box is displayed. Enter a password for the new user, and confirm the password. Move the Temporary slider to the Off position: Click Save . The Set password? confirmation dialog box is displayed. On the Set password? confirmation screen, click Save password . The Credentials tab of the User details page is displayed. Click the Role mapping tab. The Role mapping\n details are displayed. Click the default-roles-master role. Click the ellipsis ( ) for the default-roles-master role, and select Unassign : The Remove mapping? dialog box\n is displayed. Click Remove . The Role mapping\n details page shows No roles for this\n user . Click Assign role : The Assign roles to \n account is displayed. In the Name column, click one of the preconfigured roles\n to assign it to the new user: Click Assign . Next, you must assign the user to a group.\n Every user must belong to at least one group. To add the user to a group, click the Groups tab. To add a\n new group, see Adding a Group to Keycloak . Click Join Group : The Join groups for user\n page is displayed: To add the user to a group, click the check box for a group. Click Join . The Groups page is\n displayed. In the top right corner of the page, click the down arrow for the admin user, and select Sign out : You can now sign in to the Data Fabric UI using the new user. (Topic last modified: 2023-10-30) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/adding_new_users_to_keycloak.html", + "title": "Adding New Users to Keycloak" + }, + { + "content": "\nAdding a Group to Keycloak Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Adding a Group to Keycloak Describes how to add a Keycloak user group. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Adding a Group to Keycloak Describes how to add a Keycloak user group. By default, the Keycloak software provided with release 7.5.0 and later is preconfigured\n with only one user group (the fabric-manager group ). To add a new\n group: Sign in to the Keycloak administration console as described in Accessing the Keycloak Administration Console . The master realm information is\n displayed: In the left navigation pane, click Groups . Click Create a group . Specify a name for the group, and click Create . The Groups page is displayed showing the new group. Click the\n link for the new group. Click the Attributes tab. In the Key field, type gidNumber , then\n specify a gidNumber value, such as 12345 , in the Value field. Click Save . In the left navigation pane, click Users . From the list of users, click a user that you want to add to the new group. Click Join Group . Click the name of the group to which you want to add the user, and click Join . (Topic last modified: 2023-10-29) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/adding_group_to_keycloak.html", + "title": "Adding a Group to Keycloak" + }, + { + "content": "\nIntegrating Your LDAP Directory with Keycloak Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access\n the Data Fabric UI . To add an external LDAP provider in Keycloak: Sign in to the Data Fabric UI , and switch to the\n Fabric manager experience. Click Security administration . In the SSO setup card, click Configure\n LDAP . The Data Fabric UI opens the\n Keycloak administration console to the screen where you can start the process of LDAP\n integration: Click Add Ldap providers . The Keycloak User\n federation page is displayed. Fill in the information for your LDAP provider. For field-specific information,\n click the online help icon ( )\n for the field. For Keycloak documentation, see this\n page . (Topic last modified: 2024-01-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/integrating_your_ldap_directory_with_keycloak.html", + "title": "Integrating Your LDAP Directory with Keycloak" + }, + { + "content": "\nCompleting SSO Setup Using the Data Fabric UI Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work\n with your SSO server. It is a best practice to complete the SSO setup task soon after your fabric is installed.\n Non-SSO users have limited capabilities in using the Data Fabric UI . New installations of release 7.5.0 or later do not need to complete the\n SSO setup using the Data Fabric UI . But these\n instructions might be needed if you import a customer-managed cluster. To complete the SSO setup task: Sign in to the Data Fabric UI , and switch to the Fabric manager experience . Click Security administration . On the SSO setup card, click Setup\n SSO . The SSO setup form is displayed: Specify the following parameters: Parameter Description Example Provider* Your SSO provider. Currently, Keycloak is the only supported\n provider. Keycloak Provider URL* The URL of the SSO provider server. https://myserver.keycloak.com/oauth2/default Client Secret* The key that is used to authenticate a client with the Keycloak\n server. _BfjlzbnnQNbNdprf0vnQDSyXcuzziMzyrbm0raB Client ID* An identifier that enables communication between the Data Fabric and the SSO provider. 0oa8m2onb7CAohGdW5d8 Certificate The self-signed certificate from the SSO provider. Drag and drop\n the certificate into the box in the SSO Setup form. Or click Select File to navigate to the\n certificate file and select it. Click Create . The webserver restarts automatically to ensure\n that correct authentication is enforced. After submitting, wait at least 15 minutes\n for the SSO configuration to be propagated. Then sign in again with your SSO\n credentials. Related maprcli Commands To implement the features described on this page, the Data Fabric UI relies on the following maprcli commands. These commands are provided for general reference.\n For more information, see maprcli Commands in This Guide . cluster getssoconf cluster setssoconf (Topic last modified: 2023-12-12) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/sso_setup_using_dfui.html", + "title": "Completing SSO Setup Using the Data Fabric UI" + }, + { + "content": "\nResetting the SSO Configuration Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . You must have fabric manager permissions to update the SSO configuration. To view or change your SSO configuration, use these steps: Sign in to the Data Fabric UI , and switch to the Fabric manager experience . Click Administration . On the SSO setup card, view your current SSO configuration\n details, including: SSO provider Provider URL Client ID To update or correct the information, click Reset SSO\n Configuration . A confirmation dialog box indicates that the operation\n cannot be undone. Click Reset . (Topic last modified: 2023-11-06) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/resetting_sso_configuration.html", + "title": "Resetting the SSO Configuration" + }, + { + "content": "\nIdentifying All CLDB Nodes Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Accessing the Keycloak Administration Console Describes how to start the Keycloak administration console so you can manage Keycloak and your SSO users. Changing the Keycloak admin Password Describes how to change the default Keycloak admin password to prevent unauthorized access to Keycloak and your Data Fabric user information. Adding New Users to Keycloak Describes how to add new users in Keycloak so you can use them to sign in to the Data Fabric UI . Adding a Group to Keycloak Describes how to add a Keycloak user group. Integrating Your LDAP Directory with Keycloak Keycloak can interface with an external LDAP directory so that LDAP users can access the Data Fabric UI . Completing SSO Setup Using the Data Fabric UI Describes how to configure the HPE Ezmeral Data Fabric to work with your SSO server. Resetting the SSO Configuration Describes how to update your single sign-on (SSO) configuration information using the Data Fabric UI . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Identifying All CLDB Nodes Explains how you can identify all the CLDB nodes in an HPE Ezmeral Data Fabric . Some procedures in this guide require you to find all the container location database (CLDB) nodes in your fabric. CLDB Nodes in Cloud-Based Fabrics In AWS, Azure, and GCP deployments of the Data Fabric , ALL\n nodes are CLDB nodes because fabric creation configures every node as a CLDB node. CLDB Nodes in On-Premises Fabrics In an\n on-premises deployment, not all nodes are necessarily CLDB nodes. To identify if a node is a\n CLDB node, ssh into the node as root or the Data Fabric admin user, and issue the following\n command: maprcli node list -columns svc,ip A node is a CLDB node if the\n service list includes cldb as one of the\n services: $ maprcli node list -columns svc,ip\nhostname service ip\nmynode.mycompany.net s3server,historyserver,resourcemanager,fileserver,cldb,nfs,mastgateway,hoststats,apiserver 10.163.167.210 (Topic last modified: 2023-10-24) On this page CLDB Nodes in Cloud-Based Fabrics CLDB Nodes in On-Premises Fabrics \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/installation/identifying_all_cldb_nodes.html", + "title": "Identifying All CLDB Nodes" + }, + { + "content": "\nSetting Up Clients Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Clients allow hosts to communicate with the HPE Ezmeral Data Fabric . Set\n up clients and install client libraries to allow applications to access services on the HPE Ezmeral Data Fabric . To run against the HPE Ezmeral Data Fabric platform, certain application\n types require a client and the following client libraries: HDFS API HBase API Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . (Topic last modified: 2023-11-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/setting_up_clients.html", + "title": "Setting Up Clients" + }, + { + "content": "\nInstalling Clients on a Linux Host Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Basic Steps for Client Installation To install the Data Fabric client software on a Linux host that you want to communicate\n with the HPE Ezmeral Data Fabric : Complete the steps in Preparing to Install a Data Fabric Client later on\n this page. Use one of these procedures to install the client on the Linux host: Installing the Data Fabric Client on RHEL Installing the Data Fabric Client on SLES Installing the Data Fabric Client on Ubuntu Perform the steps in Installing Client Libraries to enable your fabric to\n communicate with the clients. Preparing to Install a Data Fabric Client Before you install the Data Fabric client, perform the\n following steps: Verify that the operating system on the machine where you plan to install the client\n is supported. For a list of operating systems that are compatible with the Data Fabric clients, see Operating System Support Matrix . Verify that the machine where you plan to install the client is not a fabric node. The Data Fabric client is intended for use on a\n computer that has no other Data Fabric server software\n installed. Configure repositories for the client. The client nodes also need to have the Data Fabric repositories configured in order to pull the\n client packages. See Setting up the Data Fabric Repository . Install the Data Fabric package key. The package\n key must be installed before you can install Data Fabric packages. To install the package key, issue the command appropriate for your Linux\n distribution: IMPORTANT: To access the Data Fabric internet repository, you must specify the email\n and token of an HPE Passport account. For more information, see Accessing the HPE Ezmeral Token-Authenticated Internet Repository . RHEL/Rocky/Oracle Enterprise\n Linux wget --user= --password= -O /tmp/maprgpg.key -q https://package.ezmeral.hpe.com/releases/pub/maprgpg.key && rpm --import /tmp/maprgpg.key\nwget --user= --password= -O /tmp/hpeezdf.pub -q https://package.ezmeral.hpe.com/releases/pub/hpeezdf.pub && rpm --import /tmp/hpeezdf.pub && gpg --import /tmp/hpeezdf.pub Ubuntu wget --user= --password= -O /tmp/maprgpg.key -q https://package.ezmeral.hpe.com/releases/pub/maprgpg.key && sudo apt-key add /tmp/maprgpg.key\nwget --user= --password= -O /tmp/gnugpg.key -q https://package.ezmeral.hpe.com/releases/pub/gnugpg.key && sudo apt-key add /tmp/gnugpg.key For SLES only, you do not have to install the package key because zypper allows package installation with or without the key. To install the client, obtain the Data Fabric packages for\n your operating system at https://package.ezmeral.hpe.com/ and complete the installation steps described in one of the subsequent\n topics. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. (Topic last modified: 2024-02-02) On this page Basic Steps for Client Installation Preparing to Install a Data Fabric Client \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/client_library_installation.html", + "title": "Installing Clients on a Linux Host" + }, + { + "content": "\nInstalling the Data Fabric Client on RHEL Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). These steps assume that you have already installed the package key as described in Installing Clients on a Linux Host and set up a Data Fabric repository as described in Setting up the Data Fabric Repository . Remove any previous Data Fabric software. You can use rpm -qa | grep mapr to get a list of installed Data Fabric packages, then type the packages separated by\n spaces after the rpm -e command: rpm -qa | grep mapr\nrpm -e mapr-fileserver mapr-core Install the client for your target architecture: yum install mapr-edf-clients Open the Data Fabric UI to complete the\n configuration, as described in Installing Client Libraries Step 6. (Topic last modified: 2024-01-25) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/installing_rhel_client.html", + "title": "Installing the Data Fabric Client on RHEL" + }, + { + "content": "\nInstalling the Data Fabric Client on SLES Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Remove any previous Data Fabric software. You\n can use rpm -qa | grep mapr to get a list of installed Data Fabric packages: rpm -qa | grep mapr Then type the package names\n separated by spaces after the zypper rm command. For\n example: zypper rm mapr-fileserver mapr-core Run the following command to install the Data Fabric client: zypper install mapr-edf-clients Open the Data Fabric UI to complete the\n configuration, as described in Installing Client Libraries Step 6. (Topic last modified: 2024-02-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/installing_sles_client.html", + "title": "Installing the Data Fabric Client on SLES" + }, + { + "content": "\nInstalling the Data Fabric Client on Ubuntu Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Remove any previous data-fabric client software. You can use dpkg --list\n | grep mapr to get a list of installed data-fabric packages. Then type\n the packages separated by spaces after the dpkg -r command. For\n example: dpkg -r mapr-core mapr-fileserver Update your Ubuntu repositories. For example: apt-get update Make sure the client is running JDK 11 or later: $ echo $JAVA_HOME\n/Library/Java/JavaVirtualMachines/jdk-11.0.1.jdk/Contents/Home\n$ /Library/Java/JavaVirtualMachines/jdk-11.0.1.jdk/Contents/Home/bin/java -version\nopenjdk version \"11.0.1\" 2018-10-16\nOpenJDK Runtime Environment 18.9 (build 11.0.1+13)\nOpenJDK 64-Bit Server VM 18.9 (build 11.0.1+13, mixed mode) Run the following command to install the data-fabric client: apt-get install mapr-edf-clients Open the Data Fabric UI to complete the\n configuration, as described in Installing Client Libraries Step 6. (Topic last modified: 2023-11-08) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/installing_ubuntu_client.html", + "title": "Installing the Data Fabric Client on Ubuntu" + }, + { + "content": "\nSetting up the Data Fabric Repository Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Adding the Data Fabric Repository on RHEL This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on SLES This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on Ubuntu This section describes how to install the Data Fabric repository. Accessing the HPE Ezmeral Token-Authenticated Internet Repository Describes special considerations for using the token-authenticated internet repository for Data Fabric software and the ecosystem components. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. The HPE Ezmeral Data Fabric repository on the internet provides all of\n the packages required to install a Data Fabric cluster using\n native tools such as: yum on RHEL zypper on SLES apt-get on Ubuntu Installing from the internet repository is generally the easiest installation method, but\n requires the greatest amount of bandwidth. With this method, each node is connected to the\n internet to download the required packages. Set up repositories by completing the steps for your RHEL, SLES, or Ubuntu distribution. Adding the Data Fabric Repository on RHEL This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on SLES This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on Ubuntu This section describes how to install the Data Fabric repository. Accessing the HPE Ezmeral Token-Authenticated Internet Repository Describes special considerations for using the token-authenticated internet repository for Data Fabric software and the ecosystem components. (Topic last modified: 2023-10-25) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/using_data_fabric_repos.html", + "title": "Setting up the Data Fabric Repository" + }, + { + "content": "\nAdding the Data Fabric Repository on RHEL Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Adding the Data Fabric Repository on RHEL This section describes how to install the Data Fabric repository. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Adding the Data Fabric Repository on RHEL This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on SLES This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on Ubuntu This section describes how to install the Data Fabric repository. Accessing the HPE Ezmeral Token-Authenticated Internet Repository Describes special considerations for using the token-authenticated internet repository for Data Fabric software and the ecosystem components. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Adding the Data Fabric Repository on RHEL This section describes how to install the Data Fabric repository. Procedure Change to the root user or use sudo. Create a text file called maprtech.repo in the /etc/yum.repos.d/ directory with the following content, replacing with the version of data-fabric software that you\n want to install: IMPORTANT: To access the Data Fabric internet\n repository, you must specify the user name (email) and token of an HPE Passport account.\n For more information, see Accessing the HPE Ezmeral Token-Authenticated Internet Repository . [maprtech]\nname=HPE Ezmeral Data Fabric\nbaseurl=https://package.ezmeral.hpe.com/releases/v/redhat/\nusername=\npassword=\nenabled=1\ngpgcheck=1\nprotect=1\n\n[maprecosystem]\nname=HPE Ezmeral Data Fabric\nbaseurl=https://package.ezmeral.hpe.com/releases/MEP/MEP-/redhat\nusername=\npassword=\nenabled=1\ngpgcheck=1\nprotect=1 If your connection to the Internet is through a proxy server, you must set the http_proxy environment variable before installation: You should also\n set the value for the http_proxy environment variable by adding the\n following section to the /etc/yum.conf file: http_proxy=http://:\nexport http_proxy proxy=http://:\nproxy_username=\nproxy_password= (Topic last modified: 2023-12-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/adding_repo_rhel.html", + "title": "Adding the Data Fabric Repository on RHEL" + }, + { + "content": "\nAdding the Data Fabric Repository on SLES Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Adding the Data Fabric Repository on SLES This section describes how to install the Data Fabric repository. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Adding the Data Fabric Repository on RHEL This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on SLES This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on Ubuntu This section describes how to install the Data Fabric repository. Accessing the HPE Ezmeral Token-Authenticated Internet Repository Describes special considerations for using the token-authenticated internet repository for Data Fabric software and the ecosystem components. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Adding the Data Fabric Repository on SLES This section describes how to install the Data Fabric repository. Procedure Change to the root user or use sudo . Use the following command to add the repository for Data Fabric packages, replacing with the version of Data Fabric software that you want to install: IMPORTANT: For SLES distributions, if your user name is an email address that\n includes special characters \u2013 such as the @ symbol \u2013 you must URL encode the special\n characters so that the correct email address is passed to the authentication protocols\n in the repository. For most email addresses, changing the @ symbol to %40 is sufficient.\n For example: Unencoded email address: jane.smith@company.com URL encoded email address: jane.smith%40company.com To encode other special\n characters, see \"URL Encoded Emails\" at HPE Software Delivery Repository . zypper ar https://:@package.ezmeral.hpe.com/releases/v/suse/ maprtech Use the following command to add the repository for ecosystem packages: zypper ar https://:@package.ezmeral.hpe.com/releases/MEP/MEP-/suse/ maprecosystem If your connection to the Internet is through a proxy server,\n you must set the http_proxy environment variable\n before installation: http_proxy=http://:\n export http_proxy Update the system package index by running the following\n command: zypper refresh data-fabric packages require a compatibility package in order to install and\n run on SLES. Execute the following command to install the SLES compatibility package: zypper install mapr-compat-suse Installing sshpass About this task Before installing a cluster on a SLES image, you must run\n the following command to install\n sshpass: zypper --non-interactive -q --no-gpg-checks -p http://download.opensuse.org/distribution/leap/42.3/repo/oss/ install sshpass (Topic last modified: 2023-10-25) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/adding_repo_sles.html", + "title": "Adding the Data Fabric Repository on SLES" + }, + { + "content": "\nAdding the Data Fabric Repository on Ubuntu Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Adding the Data Fabric Repository on Ubuntu This section describes how to install the Data Fabric repository. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Adding the Data Fabric Repository on RHEL This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on SLES This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on Ubuntu This section describes how to install the Data Fabric repository. Accessing the HPE Ezmeral Token-Authenticated Internet Repository Describes special considerations for using the token-authenticated internet repository for Data Fabric software and the ecosystem components. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Adding the Data Fabric Repository on Ubuntu This section describes how to install the Data Fabric repository. Procedure Change to the root user or use sudo . Create the following file: # cat /etc/apt/auth.conf.d/package.ezmeral.hpe.com.conf\nmachine package.ezmeral.hpe.com\nlogin \npassword Add the following lines to /etc/apt/sources.list, replacing with the version of data-fabric software that you\n want to install. IMPORTANT: To access the Data Fabric internet\n repository, you must specify the email and token of an HPE Passport account. For more\n information, see Accessing the HPE Ezmeral Token-Authenticated Internet Repository . deb https://package.ezmeral.hpe.com/releases/v/ubuntu/ binary bionic\ndeb https://package.ezmeral.hpe.com/releases/MEP/MEP-/ubuntu/ binary bionic Update the package indexes: apt-get update If your connection to the Internet is through a proxy server, add the following lines\n to /etc/apt/apt.conf : Acquire \n{\n Retries \"0\";\n HTTP \n {\n Proxy \"http://:@:\"; \n };\n}; (Topic last modified: 2023-10-25) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/adding_repo_ubuntu.html", + "title": "Adding the Data Fabric Repository on Ubuntu" + }, + { + "content": "\nAccessing the HPE Ezmeral Token-Authenticated Internet Repository Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Accessing the HPE Ezmeral Token-Authenticated Internet Repository Describes special considerations for using the token-authenticated internet repository for Data Fabric software and the ecosystem components. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing the Data Fabric Client on RHEL This section describes how to install the Data Fabric client on Red Hat Enterprise Linux (RHEL). Installing the Data Fabric Client on SLES This section describes how to install the Data Fabric Client on SLES. Installing the Data Fabric Client on Ubuntu This section describes how to install the Data Fabric client on Ubuntu. Setting up the Data Fabric Repository This section describes how to make packages available through the HPE Ezmeral Data Fabric repository. Adding the Data Fabric Repository on RHEL This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on SLES This section describes how to install the Data Fabric repository. Adding the Data Fabric Repository on Ubuntu This section describes how to install the Data Fabric repository. Accessing the HPE Ezmeral Token-Authenticated Internet Repository Describes special considerations for using the token-authenticated internet repository for Data Fabric software and the ecosystem components. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Accessing the HPE Ezmeral Token-Authenticated Internet Repository Describes special considerations for using the token-authenticated internet repository\n for Data Fabric software and the ecosystem\n components. Accessing the Token-Authenticated Repository Using a browser to access the new token-authenticated package repository requires you to\n supply the email address associated with your HPE account and a token. Use these steps: Navigate to the repository at https://package.ezmeral.hpe.com/ . The authorization dialog box is displayed: In the Username field, paste the email address for your\n HPE Passport account. To obtain an HPE Passport Account, see Obtaining an HPE Passport Account . In the Password field, paste a token. To obtain a token,\n see Obtaining a Token . Click Sign in . Format for Passing an HPE User Name and Token to the Repository Any files or scripts that point to the new Data Fabric internet repository must include\n the email address and token associated with a valid HPE account expressed in the\n following format: https://:@package.ezmeral.hpe.com/ Examples for Accessing the Repository In examples that require you to run Linux commands that point to the repository, this\n guide shows the format that is needed for including the user name and password. For\n example, to use a wget command with the new repository, you must add\n the email address and token as\n follows: wget --user=jane.smith@company.com --password= https://package.ezmeral.hpe.com/releases/installer/mapr-setup.sh -P /tmp Depending\n on the Linux distribution, other formats might be needed. Obtaining an HPE Passport Account An HPE Passport account is required to obtain support for Data Fabric products and gives you access to important HPE\n services. To obtain an HPE Passport account, visit the MY HPE\n SOFTWARE CENTER and click Sign In to create a new\n account. When you fill in information about your account, be sure to complete ALL of the fields\n (even fields that are not required). Leaving some fields blank can cause issues when you\n later try to access HPE repositories. Obtaining a Token A token associated with your HPE Passport account is required to obtain access to the\n HPE Ezmeral internet repositories. You can create a new token at any time by using the\n following steps. A token created in this way does not expire. The token remains valid\n even after you create a new token. To create a token for your HPE Passport account: Visit the HPE Support Center User Token page . Sign in if needed using your HPE Passport user ID and password. (Topic last modified: 2023-12-19) On this page Accessing the Token-Authenticated Repository Format for Passing an HPE User Name and Token to the Repository Examples for Accessing the Repository Obtaining an HPE Passport Account Obtaining a Token \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/accessing_hpe_internet_repo.html", + "title": "Accessing the HPE Ezmeral Token-Authenticated Internet Repository" + }, + { + "content": "\nInstalling Client Libraries Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Installing Clients on a Linux Host Describes how to install the client on a Linux host. Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication between your Linux hosts and the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Installing Client Libraries Describes how to install the client libraries on a fabric to enable communication\n between your Linux hosts and the HPE Ezmeral Data Fabric . To install client libraries on a fabric: Install the client packages using the steps described in Installing Clients on a Linux Host . Sign in to the Data Fabric UI . Select the Fabric user view on the Home page. On the Home page, click the ellipsis ( ) in the Action column of the fabric for which you want to install the\n client libraries. Alternatively, navigate to the Fabric Details page, open the Actions dropdown menu, and select Client library . Click the Client library option. The Client\n library side drawer opens. Download the config.tar and the jwt_tokens.tar.gz files listed in the Client\n library side drawer. These files include information needed to set up the\n client libraries for your fabric. Run the command to extract the\n setup: tar xf config.tar --directory /opt/mapr Run the command to extract the JWT\n tokens: tar xf jwt_tokens.tar.gz --directory /root Run the commands to export the JWT\n tokens: export MAPR_JWT_TOKEN_LOCATION=\"/root/jwt_access\" export MAPR_REFRESH_TOKEN_LOCATION=\"/root/jwt_refresh\" Run the command to configure your client\n libraries: /opt/mapr/server/configure.sh -R Run the command to test that your client libraries are set up\n correctly: hadoop fs -ls / If the client libraries are not set up\n correctly, the command returns an error message. (Topic last modified: 2024-02-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/clients/installing_client_libraries.html", + "title": "Installing Client Libraries" + }, + { + "content": "\nUpgrade Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. Upgrading a Data Fabric Describes how to upgrade your fabric and what to know before upgrading. Upgrade Fabric Parameters This page describes the configuration values that you need to specify to upgrade a data fabric. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. Upgrading a Data Fabric Describes how to upgrade your fabric and what to know before upgrading. Upgrade Fabric Parameters This page describes the configuration values that you need to specify to upgrade a data fabric. (Topic last modified: 2023-10-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/upgrade/upgrade_section.html", + "title": "Upgrade" + }, + { + "content": "\nUpgrading a Data Fabric Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. Upgrading a Data Fabric Describes how to upgrade your fabric and what to know before upgrading. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. Upgrading a Data Fabric Describes how to upgrade your fabric and what to know before upgrading. Upgrade Fabric Parameters This page describes the configuration values that you need to specify to upgrade a data fabric. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Upgrading a Data Fabric Describes how to upgrade your fabric and what to know before upgrading. Upgrading to a newer Data Fabric release is supported both\n for on-premises and cloud-based fabrics. Before Upgrading Here are some things you should know about the upgrade process: Upgrades do not happen automatically. The fabric manager must initiate an\n upgrade. It is a best practice to upgrade to the latest Data Fabric software as soon as you are prompted. Upgrades\n provide new features and fix defects that can affect your ability to use the Data Fabric . You can upgrade only to the latest currently supported Data Fabric version. For example, if your fabric is running\n release 7.4.0 and an upgrade to release 7.6.0 is available, you will have the option to\n upgrade to release 7.6.0 but not to release 7.5.0. For a list of supported releases, see Release History . The upgrade proceeds one node at a time as a \"rolling\" upgrade. This means that the Data Fabric UI is running on a node that\n eventually will get shut down and upgraded. At that point, the UI will become\n temporarily unusable. Upgrades initiated through the Data Fabric UI are\n supported only for fabrics that have a consumption-based license. A customer-managed\n Data Fabric with a term-based license cannot be upgraded in the same way. If your fabric has fewer than three nodes, you can expect the entire fabric to be\n unavailable for the client user during the upgrade. If your fabric has three or more\n nodes, the fabric will still work during the upgrade, and all client operations will\n continue to work. However, the upgrade of the node hosting the Data Fabric UI will cause the UI to go offline\n intermittently until the node is upgraded. Special Considerations for Upgrading from Release 7.4.0 Note these special considerations for upgrading from release 7.4.0 to a later Data Fabric release: You must initiate the upgrade from the primary fabric in the global namespace. If you\n initiate the upgrade from a fabric that is not the primary, you might not be able to\n monitor the upgrade status while the upgrade is in progress. To identify the primary\n fabric, issue the following maprcli command: maprcli clustergroup getcgtable -showprimary true -json To\n access a maprcli prompt, see Displaying a maprcli Prompt . Because of a\n known issue (EZINDFAAS-581), upgrades from release 7.4.0 to 7.5.0 can fail because the keypair.pem file has the wrong permissions. This issue affects\n upgrades on AWS, Azure, and GCP, but does NOT affect upgrades for on-premises fabrics. To prevent the issue, you must change the file permissions BEFORE upgrading from\n release 7.4.0 to 7.5.0. Use the following steps: Find the installer node on your cloud fabric. The installer node has the keypair.pem file. When you create a fabric using the seed node deployment steps , the seed node\n displays the endpoint of the installer node. You can also identify the installer\n node because it is the node that contains the tmp/terraform_output.json file in the deployment\n directory. Use the steps in SSH Access to a Cloud-Based Fabric to ssh to the installer node\n and display a Linux prompt. Change to the directory containing the keypair.pem file. For\n example: cd /opt/mapr/installer/ezdfaas/deployments//infrastructure/terraform/[aws|azure|gcp]/-keypair.pem Use the chmod command to change the file\n permissions: chmod -R 0400 When you are ready, initiate the upgrade from release 7.4.0 to 7.5.0. Checking to See if an Upgrade Is Supported for Your Fabric The Data Fabric UI prompts you when an upgrade is\n supported for the current version of your software. For example: If you are not sure that an upgrade is supported, you can check for supported upgrades by\n using the following steps: Sign in to the Data Fabric UI as a fabric\n manager. Click the Fabric manager experience . Click the Global namespace button. The UI displays the fabrics\n in your global namespace. See the Version column in the table. IMPORTANT: Version information for a fabric is displayed only if you have obtained a\n consumption-based license and added the\n activation key . If the Version column includes a\n prompt to upgrade to a newer release, an upgrade is supported for the fabric. For\n example: How to Upgrade To begin the upgrade process: Click the Upgrade to <7.n.n> link. The Data Fabric UI displays the Upgrade\n fabric form: Fill in the information requested by the form. For more information, see Upgrade Fabric Parameters . Click Upgrade . The Upgrade fabric status dialog box is\n displayed: IMPORTANT: You can minimize the status dialog box,\n but do not close the dialog box until the upgrade is complete. If you close the dialog\n box, you will not be able to reopen it. When the upgrade is successful, the\n value in the Version column shows the new version (for example,\n 7.5.0) Troubleshooting Upgrade Issues If an upgrade fails, position your cursor over the failure message in the Data Fabric UI to obtain more information. For example: Try to determine which node failed and\n the reason for the failure. In some cases, you can fix the issue manually. For example, if\n there is a repository or network issue, you might be able to resolve the issue on your own.\n You can then reinitiate the upgrade. To reinitiate the upgrade: In the global namespace list of resources, click the ellipsis ( ) for the\n fabric to be upgraded. The available commands are displayed. Click Reinitiate . The Upgrade fabric form is displayed. Fill in any empty values in the Upgrade fabric form. Click Upgrade . If the failure cannot be resolved manually, contact HPE Support . More information Viewing the Software Version (Topic last modified: 2023-12-07) On this page Before Upgrading Special Considerations for Upgrading from Release 7.4.0 Checking to See if an Upgrade Is Supported for Your Fabric How to Upgrade Troubleshooting Upgrade Issues \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/upgrade/upgrading_a_data_fabric.html", + "title": "Upgrading a Data Fabric" + }, + { + "content": "\nUpgrade Fabric Parameters Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. Upgrade Fabric Parameters This page describes the configuration values that you need to specify to upgrade a data fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. Upgrading a Data Fabric Describes how to upgrade your fabric and what to know before upgrading. Upgrade Fabric Parameters This page describes the configuration values that you need to specify to upgrade a data fabric. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . Upgrade Fabric Parameters This page describes the configuration values that you need to specify to upgrade a data\n fabric. Parameters with an asterisk (*) are required. Before you can initiate the Upgrade\n fabric process, you must specify all required parameters. By default, the upgrade software knows the fabric name and the provider (cloud or\n on-premises). For an on-premises upgrade, you must provide the SSH credentials used to create\n the fabric. Name* The name of the fabric. This field is typically grayed out because the Data Fabric UI prefills the Name information based on the fabric you selected for upgrading. Provider The fabric hosting information, which can be On-premises or one\n of several supported cloud services (AWS, Azure, or GCP). This field is typically grayed\n out because the Data Fabric UI prefills the Provider information based on the fabric you selected for\n upgrading. Username* The SSH user name, which is required for an on-premises upgrade. Password* The SSH password, which is required for an on-premises upgrade. (Topic last modified: 2023-10-17) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/upgrade/upgrade_fabric_params.html", + "title": "Upgrade Fabric Parameters" + }, + { + "content": "\nUser Assistance Jump to main content Get Started Platform Administration Reference Home Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Get Started This section describes how you can get started learning about, installing, and using the HPE Ezmeral Data Fabric . Release Notes These notes contain information about release 7.6.0 of the HPE Ezmeral Data Fabric as-a-service platform. Installation This section contains information about installing the HPE Ezmeral Data Fabric as-a-service platform. Service Activation and Billing Describes how to activate and register a new fabric to take advantage of automated billing. SSO Using Keycloak Describes how single sign-on (SSO) is implemented by using Keycloak. Setting Up Clients Summarizes the steps for enabling client communication with the HPE Ezmeral Data Fabric . Upgrade This section contains information that describes how to upgrade the HPE Ezmeral Data Fabric as-a-service platform. User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . User Assistance Describes how to access different resources that can help you learn how to use the HPE Ezmeral Data Fabric . To make the most of the HPE Ezmeral Data Fabric , be sure to review all\n of these user-assistance resources: Resource Description To Access HTML-Based Documentation This guide, containing release notes, conceptual information, and step-by-step\n instructions. Using any browser, navigate to https://docs.ezmeral.hpe.com/datafabric/home/index.html . Guided Tours Two-minute interactive tours provided in the Data Fabric UI . The following tours are available: \"Get Started with HPE Ezmeral Data Fabric UI as a fabric manager\" \"Get Started with HPE Ezmeral Data Fabric UI as a fabric user\" Sign in to the Data Fabric UI . Either: Click the Guided Tour button in the lower left\n corner of the screen. If the Welcome screen appears, click Start Tour . In-Application Online Help Tool tips and help buttons ( ) that\n describe fields and screens. Hold your cursor over the button, or click the button. Videos Narrated product demonstrations. Product videos are currently in development. Check back frequently for updates\n to this page. More information Other Resources (Topic last modified: 2023-07-23) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/user_assistance.html", + "title": "User Assistance" + }, + { + "content": "\nPlatform Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Platform This section contains conceptual information that can help you to understand and use\n the HPE Ezmeral Data Fabric . IMPORTANT: To view platform information for the HPE Ezmeral Data Fabric \u2013 Customer Managed platform, see this\n website . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. (Topic last modified: 2023-04-23) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/platform_main.html", + "title": "Platform" + }, + { + "content": "\nData Fabric UI Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . About the Data Fabric UI The Data Fabric UI is the browser-based, graphical user\n interface that you use to monitor and manage the HPE Ezmeral Data Fabric . The Data Fabric UI can give you access to all the\n fabrics in the global namespace . Depending on your\n user privileges, you can perform tasks such as the following (this is a partial list): Monitor system resources Monitor your billing and storage consumption Create or import fabrics Create volumes Create volume mirrors and snapshots Create buckets Create topics Manage users Control access to data The Home page provides capacity and system resource information: Switching to the Fabric manager view allows you to monitor and\n administer fabrics and resources: Launching the Data Fabric UI To launch the Data Fabric UI , navigate to the host that\n is running the WebServer in the fabric. Access to the fabric typically uses HTTPS on port\n 8443. For example: https://:8443/app/dfui (Topic last modified: 2024-01-29) On this page About the Data Fabric UI Launching the Data Fabric UI \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/data_fabric_ui.html", + "title": "Data Fabric UI" + }, + { + "content": "\nGlobal Namespace (GNS) Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. S3 Federation in Global Namespace Provides an overview of S3 federation in the global namespace Network File System in Global Namespace Describes the federation of network file system in the global namespace. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Because enterprise data is scattered across multiple sources between edge, core, and\n multi-cloud, a mechanism is needed to enable access to the data seamlessly, irrespective of\n the data location. The global namespace is a solution that aggregates disparate and remote\n data sources and provides a namespace that encompasses all of your infrastructure and\n deployments. The global namespace maintains the native security model of the HPE Ezmeral Data Fabric , so that location details are abstracted from the\n application. Global namespace technology lets you manage globally deployed data as a single resource.\n Because of the global namespace, you can view and run multiple fabrics as a single, logical,\n and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data\n centers, and clouds: In the global namespace, all fabrics can view all other fabrics. The Data Fabric UI shows the global namespace on the Home page in the Graph view . For\n example: S3 Federation in Global Namespace Provides an overview of S3 federation in the global namespace Network File System in Global Namespace Describes the federation of network file system in the global namespace. (Topic last modified: 2023-12-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/global_namespace.html", + "title": "Global Namespace (GNS)" + }, + { + "content": "\nS3 Federation in Global Namespace Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. S3 Federation in Global Namespace Provides an overview of S3 federation in the global namespace HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. S3 Federation in Global Namespace Provides an overview of S3 federation in the global namespace Network File System in Global Namespace Describes the federation of network file system in the global namespace. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. S3 Federation in Global Namespace Provides an overview of S3 federation in the global namespace Data Fabric provides a native implementation of the S3 object store for object\n management. Additionally, Data Fabric provides a mechanism to import data that is stored on\n S3-compliant object stores from vendors other than HPE such as AWS , Scality, WEKA, VAST, to name a few . A third-party object store that is\n managed by vendors other than HPE is referred to as an external S3 object store. A global namespace facilitates the federation of objects from both Data Fabric S3 object\n store as well as external S3 object stores via the Data Fabric UI. All buckets and objects from your S3 object stores can be accessed on the global\n namespace via the Data Fabric UI. You can access remote S3 object stores imported into your global namespace from any\n fabric in the same global namespace, by obtaining the access points to the object\n stores. Prerequisites for S3 Federation Following are the prerequisites for S3 federation on a global namespace. Object stores and fabrics in the global namespace must be able to\n communicate with one another over the network. Object stores and fabrics in the global namespace must be part of the same\n SSO client and realm and same LDAP server. See Working with External S3 Object Store for details on managing external S3\n servers via the Data Fabric UI. (Topic last modified: 2024-02-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/s3_federation_in_global_namespace_0.html", + "title": "S3 Federation in Global Namespace" + }, + { + "content": "\nNetwork File System in Global Namespace Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Network File System in Global Namespace Describes the federation of network file system in the global namespace. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. S3 Federation in Global Namespace Provides an overview of S3 federation in the global namespace Network File System in Global Namespace Describes the federation of network file system in the global namespace. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Network File System in Global Namespace Describes the federation of network file system in the global namespace. A network file server(NFS) is hosted on a remote network, typically in a different\n physical location. The remote network file servers provide file-sharing services using\n NFS, allowing clients from different networks or locations to access and share files\n over the network. HPE Ezmeral Data Fabric offers a native implementation of NFSv4. The global namespace has been extended to include the import of data from one or more\n NFSv4 systems offered by vendors other than HPE. The NFSv4 system that is offered by\n vendors other than HPE is referred to as an external NFS with respect to Data Fabric. An imported external NFS appears as a part of the global namespace on the Data Fabric\n UI. The import of an external NFS facilitates the federation of all your NFS data,\n irrespective of the NFS vendor. See Working with an External NFS Server for details on importing an external file\n system into the global namespace via the Data Fabric UI. (Topic last modified: 2023-11-08) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/external_network_file_system_in_global_namespace.html", + "title": "Network File System in Global Namespace" + }, + { + "content": "\nSingle Sign-On (SSO) Support Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on\n (SSO). Keycloak IAM Support The HPE Ezmeral Data Fabric supports SSO when configured with the\n Keycloak identity and access management (IAM) solution. Other IAM solutions are not\n currently supported. Keycloak Is Preinstalled and Preconfigured Starting with release 7.5.0, Keycloak is preinstalled and preconfigured whenever you create\n a new fabric. You can create new users and roles easily and quickly by using the Keycloak\n administration console. For more information, see SSO Using Keycloak . Limitation for Non-SSO Users SSO users with sufficient credentials can view and manage resources on all fabrics. Non-SSO\n users can view and manage resources only on the fabric to which they are signed in. Non-SSO\n users cannot view or manage resources on other fabrics. The Data Fabric UI does not display\n these resources to non-SSO users because the UI cannot connect to other fabrics without the\n same login information. (Topic last modified: 2023-10-29) On this page Keycloak IAM Support Keycloak Is Preinstalled and Preconfigured Limitation for Non-SSO Users \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/sso_support.html", + "title": "Single Sign-On (SSO) Support" + }, + { + "content": "\nIceberg Support Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Apache Iceberg Apache Iceberg is an open-source table format that helps to simplify the data processing of\n huge data sets on a file system or object store. Iceberg brings the simplicity of SQL tables\n to huge data sets. Iceberg has the following capabilities: Iceberg tables are fast, safe, scalable, and can easily integrate with analytics\n engines like Spark, PrestoDB, Hive, and so on. Iceberg supports Atomicity, Consistency, Isolation, and Durability (ACID)\n transactions. You can use analytics engines like Spark, PrestoDB, Hive, and Impala to safely perform\n ACID transactions on the same table at the same time. Iceberg supports schema evolution, hidden partitioning, partition layout evolution,\n and time travel, which minimize unpleasant surprises. See the Apache Iceberg documentation for details. Data Fabric and Iceberg Starting from Data Fabric 7.6.0, you can perform the\n following operations in the HPE Ezmeral Data Fabric Object Store : Create a schema for Avro, ORC, or Parquet data types, and modify the schema if\n needed. Create Iceberg tables using a specific schema and perform ACID transactions. Create a snapshot of a table to check time travel. Grant access permissions for an Iceberg table to different users. Perform data migration of data files into an Iceberg table, as well as migrate the\n metadata. Query an Iceberg table through Apache Spark. Create an Iceberg table in an external S3 bucket and query it through the HPE Ezmeral Data Fabric Object Store . With these features, you can build a reliable and scalable Data-Lakehouse architecture. More information Getting Started with Iceberg (Topic last modified: 2024-01-30) On this page Apache Iceberg Data Fabric and Iceberg \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/iceberg_support.html", + "title": "Iceberg Support" + }, + { + "content": "\nFabric Resources Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Fabric Resources Describes fabric resources. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Volumes Brief conceptual information about volume. Buckets Describes buckets and the objects that they store. Topics Describes topics that are relevant to streaming data. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Fabric Resources Describes fabric resources. What is a fabric resource? Fabric resources are the entities or resources that are associated with a fabric. Fabric resources are used to store organizational data. Organizational data is\n available in both structured and unstructured formats, in static and streaming\n format. Different fabric resources can be used to store the organizational data, depending on\n the data format. Fabric Resource Types Following is the list of fabric resources supported by HPE Ezmeral Data Fabric . Volumes : Volumes are used to store static data\n or structured data. Buckets : Buckets are used to store large\n objects or data that is disparate in nature such as audio files, video\n files, and images. Topics : Topics are used to store streaming or\n real-time data. Volumes Brief conceptual information about volume. Buckets Describes buckets and the objects that they store. Topics Describes topics that are relevant to streaming data. (Topic last modified: 2023-04-25) On this page What is a fabric resource? Fabric Resource Types \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/fabric_resources.html", + "title": "Fabric Resources" + }, + { + "content": "\nVolumes Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Fabric Resources Describes fabric resources. Volumes Brief conceptual information about volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Volumes Brief conceptual information about volume. Buckets Describes buckets and the objects that they store. Topics Describes topics that are relevant to streaming data. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Volumes Brief conceptual information about volume. A volume is a logical unit that allows you to apply policies to a set of files,\n directories, and sub-volumes. You can use volumes to enforce disk usage limits, set\n replication levels, establish ownership and accountability, and measure the cost\n generated by different projects or departments. The volume structure defines how data is distributed across the nodes in your cluster. You can create a volume for each user, department, or project. You can mount volumes under other volumes to build a structure that reflects the needs of\n your organization. Sub-volumes are created by mounting a volume in a sub-directory of an already mounted\n volume. This establishes a parent-child relationship between the volumes whereas the\n parent volume is mounted in top-level directory and the child volume is mounted in the\n sub-directory. Create multiple small volumes with shallow paths at the top of a fabric\n volume hierarchy to spread the load of access requests across the nodes. A well-structured volume hierarchy is an essential aspect of fabric performance. As the\n data in a fabric grows, Having an efficient volume hierarchy maximizes data\n availability as the data in a fabric grows. Fabric performance is negatively affected\n when a volume structure is not in place. (Topic last modified: 2023-04-24) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/volumes.html", + "title": "Volumes" + }, + { + "content": "\nBuckets Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Fabric Resources Describes fabric resources. Buckets Describes buckets and the objects that they store. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Volumes Brief conceptual information about volume. Buckets Describes buckets and the objects that they store. Topics Describes topics that are relevant to streaming data. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Buckets Describes buckets and the objects that they store. Buckets are containers that store objects. Objects comprise disparate types of data, such\n as audio files, video files, and images. Object-based storage is the preferred method of storing and efficiently managing gigantic\n amount of data. Underlying each Data Fabric bucket is a volume. Every\n bucket created in a Data Fabric user account is\n automatically associated with a volume. (Topic last modified: 2023-05-12) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/buckets.html", + "title": "Buckets" + }, + { + "content": "\nTopics Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Fabric Resources Describes fabric resources. Topics Describes topics that are relevant to streaming data. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Volumes Brief conceptual information about volume. Buckets Describes buckets and the objects that they store. Topics Describes topics that are relevant to streaming data. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Topics Describes topics that are relevant to streaming data. Topics are used to store streaming data. A topic can be thought of as a persistent message queue. The lifetime of a topic or the\n time for which a topic must persist is configurable. One or more client applications called producers publish streaming data in the form of\n messages to a topic. One or more client applications called consumers subscribe to the\n topics of their choice to consume the messages that are published to topics by\n subscribers. Multiple producers can publish messages to the same topic and multiple subscribers can\n subscribe to and consume the message from such a topic. Messages thus published to a topic are arranged or queued in the sequence of the\n publishing time. (Topic last modified: 2023-05-04) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/topics.html", + "title": "Topics" + }, + { + "content": "\nData Storage Management Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. Data Tiering Describes data tiering for efficient data access and data storage. Data Storage Policy Describes the use of data storage policy rules. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to\n give you access to your data. HPE Ezmeral Data Fabric facilitates efficient storage of data that\n is based on the frequency at which data is accessed. Data Fabric provides rule-based automated tiering\n functionality that allows you to integrate seamlessly with: Low-cost storage as an additional storage tier in the fabric for storing file\n data that is less frequently accessed (\"warm\" data) in an erasure-coded\n volume. Third-party cloud object storage as an additional storage tier in the fabric to\n store file data that is rarely accessed or archived (\"cold\" data). In this way, valuable on-premise storage resources can be used for more active or hot\n file data and applications, while warm and/or cold file data can be retained at minimum\n cost for compliance, historical, or other business reasons. Data Fabric provides consistent and simplified access to and\n management of the data. Data Tiering Describes data tiering for efficient data access and data storage. Data Storage Policy Describes the use of data storage policy rules. (Topic last modified: 2023-08-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/data_storage_management.html", + "title": "Data Storage Management" + }, + { + "content": "\nData Tiering Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. Data Tiering Describes data tiering for efficient data access and data storage. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. Data Tiering Describes data tiering for efficient data access and data storage. Cold Tier Describes a cold tier. Data Read, Write, and Recall Data Storage Policy Describes the use of data storage policy rules. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Data Tiering Describes data tiering for efficient data access and data storage. Data that is active and frequently accessed is considered as hot data. Data that is rarely accessed is\n consideredcold data. Hot data, and cold data is identified based\n on the rules and policies set by the administrator. The mechanism used to store hot data is referred to as the hot-tier (or the data fabric\n cluster), \n and the mechanism to store cold data is referred to as the cold tier (or low-cost\n storage alternative on the cloud). Data starts off as hot when it is first written to local storage (on the data fabric\n cluster). It becomes cold based on the rules\n and policies the administrator configures. Data can be set up to be automatically offloaded using the data fabric automated storage\n tiering (MAST) Gateway service to the low-cost\n storage alternative on the third party cloud object store (cold tier) like S3. The mechanism used to store hot data is referred to as the hot-tier that is nothing but\n the fabric storage or volumes. The mechanism to store cold data is referred to as the cold\n tier (or low-cost storage alternative on the cloud). Cold Tier Describes a cold tier. Data Read, Write, and Recall (Topic last modified: 2023-08-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/data_tiering.html", + "title": "Data Tiering" + }, + { + "content": "\nCold Tier Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. Data Tiering Describes data tiering for efficient data access and data storage. Cold Tier Describes a cold tier. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. Data Tiering Describes data tiering for efficient data access and data storage. Cold Tier Describes a cold tier. Data Read, Write, and Recall Data Storage Policy Describes the use of data storage policy rules. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Cold Tier Describes a cold tier. On the data fabric cluster, every cold tier (referred to as remote target on the Data\n Fabric UI) has a bucket on a third-party cloud store where volume data is offloaded\n based on the policy configured by the administrator. Volume data in 64KB data chunks is packed into 8MB sized objects and offloaded to the\n bucket on the tier and the corresponding volume metadata is stored in a visible\n tier-volume as HPE Ezmeral Data Fabric Database tables on the data fabric cluster. During writes and reads, volume data is recalled to the data fabric cluster if necessary.\n Data written to the volume is periodically moved to the remote target, releasing the\n disk space on the filesystem. See Data Reads, Writes, and Recalls for more\n information. Data stored on the data fabric cluster requires thrice the amount of disk space of the\n regular volume on premium hardware due to replication (default being 3). After\n offloading to the cloud, the space used by data (including data in the namespace\n container) in the volume on the data fabric cluster is freed and only the metadata of\n the volume in the namespace container is 3-way replicated on the data fabric\n cluster. There is also a visible tier-volume on the data fabric cluster for storing the metadata\n associated with the volume. When you create a cold tier, the tier volume named mapr.internal.tier. is by default created in the /var/mapr/tier path. A directory/folder for the volumes associated\n with the tier, identifiable by volumeid , is created under the path\n after the first offload of data from the volume to the tier. You can create one tier per volume or create and associate multiple volumes with the same\n tier using the Data Fabric UI. (Topic last modified: 2023-08-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/cold_tier.html", + "title": "Cold Tier" + }, + { + "content": "\nData Read, Write, and Recall Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. Data Tiering Describes data tiering for efficient data access and data storage. Data Read, Write, and Recall HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. Data Tiering Describes data tiering for efficient data access and data storage. Cold Tier Describes a cold tier. Data Read, Write, and Recall Data Storage Policy Describes the use of data storage policy rules. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Data Read, Write, and Recall Once offloaded to the storage tier, data is considered to be cold on the storage tier, but the data\n can still be accessed (read, written, and recalled). Read of Tiered\n Data When the standard volume data is outside of the fabric storage, and\n in the cloud (cold tiering) , Data Fabric processes the request to read\n standard volume data and mirror volume data. Data Reads on Tiering-enabled Standard Volume When a client attempts to read, the read request is first sent to the volume on the\n fabric and if the data exists in the volume on the fabric, the data is returned from\n the volume. On the other hand, if the data was offloaded to a storage tier, Data\n Fabric recalls the data from the cold-tier to process the read request. Data Reads on Tiering-enabled Mirror Volume When a client attempts to read, the read request is first sent to the volume on the\n data-fabric cluster and if the data exists in the volume on the cluster, the data is\n returned from the volume. On the other hand, if the data was offloaded, Data Fabric\n recalls or fetches a copy of the data (from the tier) into an associated\n cache-volume, from where data is returned to the client. Write on Tiered Data When writes happen, if the write is: An append, new data is offloaded when the data meets the criteria in the rule\n (associated with the volume) for offload. A change to existing data (overwrite), the data is recalled to the data-fabric\n filesystem to allow the write to succeed and then offloaded when the data meets\n the criteria in the rule (associated with the volume) for offload. NOTE: If cold data is accessed (read/written) frequently, I/O to that file may\n suffer large latencies. In such scenarios, recall the whole volume or the\n corresponding files. Recall of Tiered Data Offloaded data is automatically recalled when a client performs a read or overwrite\n on the data in the cold-tier, or when a client performs an overwrite on the data in\n the warm-tier. Data Fabric fetches a copy of the data to allow the operations to\n succeed. (Topic last modified: 2023-08-02) On this page Data Reads on Tiering-enabled Standard Volume Data Reads on Tiering-enabled Mirror Volume Write on Tiered Data Recall of Tiered Data \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/data_read_write_and_recall.html", + "title": "Data Read, Write, and Recall" + }, + { + "content": "\nData Storage Policy Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. Data Storage Policy Describes the use of data storage policy rules. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. Data Tiering Describes data tiering for efficient data access and data storage. Data Storage Policy Describes the use of data storage policy rules. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. Data Storage Policy Describes the use of data storage policy rules. The storage policy simplifies the lifecycle management of data in the volume including\n automated migration of files to low-cost storage alternatives. The policy can contain\n rules for files that have a well-defined lifecycle or for files you want to switch to\n different storage tiers during their lifecycle. You can specify the rules, at the volume level, to selectively identify files to offload\n (such as file size, file owner, and file modification time), the schedule for offloading\n the data (for example, 2 months after file modification), and the settings for storing\n (such as the location and credentials for the tier) and recalling the offloaded data.\n You can configure one rule per volume using the CLI or REST API. You can also associate\n a schedule to automatically offload data at scheduled intervals based on the associated\n rules. Data offload is driven by rules, which are configured per volume. Data offload rule can\n be based on size of file, owner of the file, and/or file modification timestamp. You can\n apply one rule per volume. When a rule is associated with a volume, the rule is first applied on the files in the\n tiering-enabled volume. When applied on the files in the tiering-enabled volume, the\n offload is triggered for all files in the snapshot chain as well when the criteria in\n the rule is met. If the file does not exist in the tiering-enabled volume, rule is\n applied on the latest state of the file in the snapshot chain. If the file exists in the\n tiering-enabled volume but has no latest state or if the file was deleted in the\n tiering-enabled volume, offload does not happen. (Topic last modified: 2023-09-08) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/platform/data_storage_policy.html", + "title": "Data Storage Policy" + }, + { + "content": "\nAWS Architecture Notes Jump to main content Get Started Platform Administration Reference Home Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Platform This section contains conceptual information that can help you to understand and use the HPE Ezmeral Data Fabric . Data Fabric UI Describes the graphical user interface for the HPE Ezmeral Data Fabric . Global Namespace (GNS) Describes the data plane that connects all of your HPE Ezmeral Data Fabric deployments. Single Sign-On (SSO) Support Describes how the HPE Ezmeral Data Fabric supports single sign-on (SSO). Iceberg Support Describes support for Iceberg in HPE Ezmeral Data Fabric 7.6.0. Fabric Resources Describes fabric resources. Data Storage Management Summarizes options that the HPE Ezmeral Data Fabric provides to give you access to your data. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on Amazon AWS. AWS Architecture Notes Describes architectural considerations for the HPE Ezmeral Data Fabric software-as-a-service (SaaS) platform as deployed on\n Amazon AWS. Deployment Topology You can provision an HPE Ezmeral Data Fabric in Amazon AWS and in other public clouds to\n take advantage of the benefits of cloud computing. A single instance of the Data Fabric is\n referred to as a fabric . The fabric provides a high-performance file\n system for files, objects, and streaming files and can be deployed quickly and easily. The following diagram shows the high-level architecture for a single cloud-based fabric on\n AWS: The HPE Ezmeral Data Fabric is designed so that many fabrics deployed in different public clouds or on premises can\n communicate with each other seamlessly in a global\n namespace . Deployment Prerequisites The user who deploys the Data Fabric on AWS must have PowerUserAccess and must provide information such as\n the: Fabric name Access key Secret key Region Virtual private cloud (VPC) ID Public subnet ID For more information, see AWS Fabric Configuration Parameters . Public and Private Subnets To enable a global namespace consisting of many fabrics accessible over the internet, the\n user must provide a public subnet. The global namespace cannot currently be implemented with\n private subnets. The Data Fabric architecture does not\n prevent the use of private subnets, but some code changes are required before private\n subnets can be supported. Note that air-gapped, on-premises installations are fully\n supported. Regions and Availability Zones The Data Fabric can be deployed into the following AWS regions: US East (Ohio) US East (N. Virgina) US West (N. California) US West (Oregon) In the current architecture, all fabric instances reside in a specific subnet, which is\n contained within a single availability zone (the default availability zone). Amazon Machine Images (AMIs) Users of the HPE Ezmeral Data Fabric do not need to create or manage\n the AMIs needed to support the Data Fabric on AWS. HPE\n provides a set of publicly available AMIs that facilitate installation and upgrade of the\n fabric without the need for user interaction. Security Groups During fabric creation, a security group is created for each fabric. The security group is\n configured with predefined in-bound and out-bound rules to support the list of ports required for fabric-to-fabric communication. Instance, Disk, and Memory Information See AWS Cloud Instance Specifications . Upgrades When a new software version is available, the user is notified. At the user\u2019s discretion,\n the platform can perform a non-disruptive, rolling upgrade from one major software version\n to another. Scaling Adding nodes to a fabric or additional storage devices to a node is not currently\n supported. Administrative Interface The Data Fabric UI provides a browser-based graphical user interface for\n monitoring and managing all fabrics in a global namespace. SSO and Predefined Roles The Data Fabric leverages the Keycloak identity and access management (IAM) solution to\n ensure that all the fabrics in a global namespace have access to the same user information.\n Keycloak can be used as a passthrough with other popular IAM solutions. SSO-configured fabrics support the following predefined roles: Infrastructure Admin Fabric Manager Fabric User For more information about the permissions granted to each role, see User and Role Management . (Topic last modified: 2023-09-26) On this page Deployment Topology Deployment Prerequisites Public and Private Subnets Regions and Availability Zones Amazon Machine Images (AMIs) Security Groups Instance, Disk, and Memory Information Upgrades Scaling Administrative Interface SSO and Predefined Roles \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/aws_architecture_notes.html", + "title": "AWS Architecture Notes" + }, + { + "content": "\nAdministration Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administration This section describes how to administer fabric resources in the global namespace of\n your HPE Ezmeral Data Fabric as-a-service platform. IMPORTANT: To administer the HPE Ezmeral Data Fabric \u2013 Customer Managed platform, see this website . IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. (Topic last modified: 2024-02-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/administration_main.html", + "title": "Administration" + }, + { + "content": "\nIPv6 Support in Data Fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Enabling IPv6 on a fabric Describes the procedure to enable IPv6 communication on a fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Data Fabric can be installed on hosts with IPv6 addresses. In other words, external\n endpoints for Data Fabric can have IPv6 addresses. Data Fabric can communicate with\n clients over IPv6 addressing. Inter-cluster traffic and intra-cluster traffic over\n IPv6 connections is supported with IPv4 compatibility. Data Fabric deployment over IPv6 addresses is possible when both the hardware hosting\n Data Fabric and the Data Fabric software are able to detect and support IPv6\n addresses. The underlying hardware that hosts Data Fabric must have a network interface card\n (NIC) that supports IPv6 addressing. An application that wishes to communicate with Data Fabric over IPv6 can do so, when\n Data Fabric is installed on IPv6-compatible hardware and IPv6 support is enabled on\n Data Fabric. The following table describes the terminology related to IPv6 client/server\n nodes. Term Description IPv6-aware The term denotes readiness of the underlying hardware. It\n indicates that the NIC associated with a node that hosts Data\n Fabric is IPv6 compatible, and can communicate with other nodes\n with IPv6 and IPv4 addresses. IPv6-unaware The term denotes readiness of the underlying hardware. It\n indicates that the NIC associated with a node that hosts Data\n Fabric is incompatible to handle IPv6 traffic, and can handle\n IPv4 traffic only. IPv6-enabled The term denotes that IPv6 is enabled on Data Fabric\n software. The Data Fabric node on which IPv6 is enabled is able\n to communicate with IPv6 addresses. The node is able to\n communicate with IPv4 addresses. IPv6-only The term denotes that IPv6 is enabled on Data Fabric\n software. The Data Fabric node on which IPv6 is enabled is able\n to communicate exclusively with IPv6 addresses only.\n Communication with IPv4 addresses is not supported on this\n node. The following matrix explains in detail the communication between a client node and a\n Data Fabric node for various IP address type combinations. Type of application (Type of client node) IPv6-unaware server (IPv4 -only server node) IPv6-unaware server (IPv6-enabled server node IPv6-aware server (IPv6-only server node) IPv6-aware server (IPv6-enabled server node) IPv6-unaware client (IPv4-only node) client-server communication takes over IPv4 client-server communication takes place over IPv4 no communication client-server communication takes place over IPv4 IPv6-unaware client (IPv6-enabled node) client-server communication takes over IPv4 client-server communication takes place over IPv4 no communication client-server communication takes place over IPv4 IPv6-aware client (IPv6-only node) no communication no communication client-server communication takes over IPv6 client-server communication takes over IPv6 IPv6-aware client (IPv6-enabled node) client-server communication takes over IPv4 client-server communication takes place over IPv4 client-server communication takes place over IPv6 client-server communication takes place over IPv6 Enabling IPv6 on a fabric Describes the procedure to enable IPv6 communication on a fabric. (Topic last modified: 2024-01-10) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/working_with_ipv6_ip_addresses.html", + "title": "IPv6 Support in Data Fabric" + }, + { + "content": "\nEnabling IPv6 on a fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Enabling IPv6 on a fabric Describes the procedure to enable IPv6 communication on a fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Enabling IPv6 on a fabric Describes the procedure to enable IPv6 communication on a fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Enabling IPv6 on a fabric Describes the procedure to enable IPv6 communication on a fabric. Prerequisites You must have access to the command-line as a root user in order to enable IPv6 support on a fabric. The\n hardware on which Data Fabric is installed must be IPv6 compatible; that is, the\n hardware must have an IPv6-compatible NIC. About this task IPv6 can be enabled by using the --ipv6-support or -6 option with configure.sh . If you are doing a fresh installation of Data Fabric, you can use the --ipv6-support or -6 option while running configure.sh , to enable IPv6 support on the fabric. If you have\n an existing installation and wish to enable IPv6, the configure.sh script must be re-run to enable IPv6. NOTE: To enable inter-cluster communication over IPv6, IPv6 must be enabled over all\n communicating fabrics. To enable intra-cluster communication over IPv6, IPv6\n must be enabled over all communicating nodes of the fabric. Using the following steps to enable IPv6 on one or more Data Fabric nodes. Procedure Log on as the root user to the primary node command line of\n the fabric on which you wish enable IPv6. On the command line, run one of the following commands to enable IPv6 on the\n specified fabric nodes: /opt/mapr/server/configure.sh ,... --ipv6-support\n/opt/mapr/server/configure.sh ,... -6 (Topic last modified: 2024-02-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/enabling_ipv6_on_a_fabric.html", + "title": "Enabling IPv6 on a fabric" + }, + { + "content": "\nAdministering Fabrics Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric\n UI. A data fabric is a collection of nodes that work together under a unified architecture, along\n with the services or technologies running on that architecture. Fabrics help you manage your\n data, making it possible to access, integrate, model, analyze, and provision your data\n seamlessly. Using the Data Fabric UI , you can create fabrics hosted by the following\n providers: AWS Azure GCP On-premises Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the Data Fabric UI . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. (Topic last modified: 2023-08-07) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/administering_fabrics.html", + "title": "Administering Fabrics" + }, + { + "content": "\nConfiguring a Proxy Server for Data Fabric Access to the Internet Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where\n communication between the internet and Data Fabric must happen over a proxy\n server. Prerequisites You must be a fabric manager to be able to perform this\n task. About this task Depending on the network configuration, Data Fabric might require to connect to the\n internet via a proxy server for download of Data Fabric packages, or to communicate\n with the SSO server that might be outside of the network in which Data Fabric is\n installed. A https or http proxy server can serve as a common routing point for\n internet access from Data Fabric. It is recommended to use an https proxy for secure communication between Data Fabric\n and any server to be accessed over the internet. You may, optionally, specify a http proxy server for any communication over http. The format to specify the proxy server is :. Some Data Fabric components are auto-restarted when you save the proxy settings. You\n are required to refresh the Data Fabric UI after\n about 15 to 20 minutes after saving the proxy settings. NOTE: If you have signed in to the Data Fabric UI by using your single sign-on (SSO) credentials, your session is\n active, and it should suffice to refresh the page in the web browser after the\n restart of Data Fabric components. If you are a non-SSO user, you must log off\n and log back in to the Data Fabric UI . Follow the steps given below to configure a proxy server. Procedure Log on to the Data Fabric UI and switch to\n the Fabric manager experience . Click Fabric Administration . On the Fabric Settings card, click the Edit icon\n adjacent to Proxy configuration . Enter the https proxy URL and port number in HTTPS proxy in the\n : format. Optionally, enter the http proxy URL and port number in HTTP proxy in\n the : format. Click Save . Results The proxy settings are saved. Some Data Fabric components\n restart and this might result in an error after about 15 minutes of saving the proxy\n settings. Refresh the Data Fabric UI page that you\n are working on at such time. (Topic last modified: 2023-10-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/configuring_proxy.html", + "title": "Configuring a Proxy Server for Data Fabric Access to the Internet" + }, + { + "content": "\nCreating a Fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or\n on-premises deployment. If your organization has multiple departments or multiple use cases to\n support, you can create multiple fabrics. This page describes the basic steps to create a new\n fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Before Creating a Fabric Note these considerations: If you are creating your first fabric, see Fabric Deployment Using a Seed Node . You\n must use the seed node steps to create your first fabric. For all subsequent fabrics you\n can use the steps on this page. To create a fabric, you must have fabric manager credentials . The Create Fabric button is not displayed\n for developer and infrastructure admin credentials. Currently, only SSO users can create fabrics. You must obtain a license for and register each new fabric that you create. Always create fabrics one at a time. You cannot create multiple fabrics at the same\n time. Creating an on-premises fabric requires that you provide host nodes before starting fabric creation. These nodes must meet certain prerequisites. Before creating\n an on-premises fabric, review Prerequisites for On-Premises Installation . Steps for Creating a Fabric Use the following steps to create a new fabric. Log on to the Data Fabric UI with Fabric Manager credentials . Click Create fabric . The Create fabric form appears. Fill in the configuration parameters for the type of fabric you want to create: AWS Fabric Configuration Parameters Azure Fabric Configuration Parameters GCP Fabric Configuration Parameters On-Premises Fabric Configuration Parameters Click Create . To monitor the progress of fabric creation, check the status bar in the Fabric details dialog box, or click See\n details . Fabric creation can take 20 minutes or\n more. If fabric creation fails, you can retry the operation. Click the\n ellipsis in the Action column, and select Reinitiate . If fabric creation continues to fail, and the\n failure cannot be resolved manually, contact HPE Support . When the installation status shows Installed , click the\n ellipsis ( ) in the Action column, and select View endpoints . The URL for the new fabric is displayed, and\n you can copy the URL to the clipboard. Add your fabric activation key. See Adding an Activation Key . Register the fabric. See Registering a Fabric . Set the billing model. See Setting the Billing Model . (Topic last modified: 2024-01-16) On this page Before Creating a Fabric Steps for Creating a Fabric \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/create_fabric.html", + "title": "Creating a Fabric" + }, + { + "content": "\nImporting a Fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Importing an as-a-Service Fabric Describes the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Importing a Fabric This section provides the steps to import an as-a-service fabric into the global\n namespace. Importing an as-a-Service Fabric Describes the steps to import an as-a-service fabric into the global namespace. (Topic last modified: 2023-12-04) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/import_fabric.html", + "title": "Importing a Fabric" + }, + { + "content": "\nImporting an as-a-Service Fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Importing an as-a-Service Fabric Describes the steps to import an as-a-service fabric into the global namespace. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Importing an as-a-Service Fabric Describes the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Importing an as-a-Service Fabric Describes the steps to import an as-a-service fabric into the global\n namespace. Considerations for Importing an as-a-Service Fabric An as-a-service fabric is a fabric that exists as part of a global namespace and was\n created using the Create fabric functionality of the Data Fabric UI . From any as-a-service fabric, you can import\n another as-a-service fabric into the global namespace by using the Import\n fabric command. A fabric can belong to only one global namespace at a time. Thus, the act of importing an\n as-a-service fabric necessarily removes the fabric from the global namespace to which it\n currently belongs. To view the current list of fabrics in your global namespace, display the Table\n view or Graph view on the Resources card of the Home page. Note these considerations: Only fabrics configured for SSO can be imported. To import a fabric, you must be an SSO user and have Fabric Manager or Fabric User credentials . You can only import one fabric at a time. You must have a consumption license for each new fabric that you import. Preparing to Import an as-a-Service Fabric On the fabric that you plan to import, stop Keycloak: Use the following command to identify the host running the mapr-keycloak service: maprcli node list -columns svc In the command output,\n look for a host that shows keycloak in the service column. If no host shows the mapr-keycloak service in the service column, go to step 2. Stop the mapr-keycloak service: maprcli node services -name keycloak -action stop -nodes -json On the fabric that you plan to import, reset the SSO information: Reset the SSO\n configuration: maprcli cluster resetssoconf -json Restart the mapr-apiserver services on the fabric\n hosts: maprcli node services -name apiserver -action restart -nodes host1,host2 -json Use the following command to disable the pbs.master role for the\n fabric to be imported: maprcli config save -values {cldb.pbs.global.master:0} -json If\n any security policies have been created on the fabric to be imported, they must be\n manually re-created on the importing fabric after the import operation is completed. To\n re-create the policies, refer to Administering Security Policies . On the cluster to be imported, create a tar ball of the fabric\n directory: /opt/mapr/installer/ezdfaas/deployments/ Copy the contents of the tar ball to the importing cluster's /opt/mapr/installer/ezdfaas/deployments directory. Extract the\n contents, and be sure to delete the .tar file: Obtain the SSO configuration from the importing fabric, and configure it on the fabric\n to be imported: Use the following command to fetch the SSO parameters from the importing\n fabric: maprcli cluster getssoconf -json For\n example: maprcli cluster getssoconf -json\n{\n\"timestamp\":1699432649586,\n\"timeofday\":\"2023-11-08 12:37:29.586 GMT-0800 AM\",\n\"status\":\"OK\",\n\"total\":1,\n\"data\":[\n{\n\"issuerendpoint\":\"https://:6443/realms/master\",\n\"providername\":\"keycloak\",\n\"clientid\":\"edf-client\",\n\"clientsecret\":\" \"\n}\n]\n} Obtain the SSO certificate from the importing fabric's /opt/mapr/keycloak/conf/.crt , and use it to set\n the SSO configuration information for the fabric to be imported. Use the following\n command: maprcli cluster setssoconf -issuerendpoint \"https://:8443/realms/TestReallm\" -providername keycloak -clientid edf-client -clientsecret -certfile -json Restart the mapr-apiserver services. maprcli node services -name apiserver -action restart -nodes host1,host2 -json Wait for a minute to ensure that the SSO configuration is active, then try signing\n in to the UI: https://:8443/app/dfui You should\n be redirected to the Keycloak sign-in screen. Use the Data Fabric UI to complete the Import operation as described in the next section. Completing the Import Operation by Using the Data Fabric UI Use the following steps to complete the import operation: Log on to the Data Fabric UI as a Fabric\n Manager. Click Import fabric . The Import fabric menu appears. Specify the current Name of the fabric to be imported. Do not\n change the name of the fabric to be imported. Specify the Public IP address of the APIserver of the fabric to\n be imported. Specify the port of the APIserver for the fabric to be imported. Click Import . Once the Import operation is finished, ensure that the imported\n fabric is part of a cluster group and the fabric is listed as part of the global\n namespace. Complete the following tasks for the new fabric: Registering a Fabric Adding an Activation Key Setting the Billing Model (Topic last modified: 2023-12-08) On this page Considerations for Importing an as-a-Service Fabric Preparing to Import an as-a-Service Fabric Completing the Import Operation by Using the Data Fabric UI \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/import_as_a_service_fabric.html", + "title": "Importing an as-a-Service Fabric" + }, + { + "content": "\nViewing the Fabric Status Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing the Fabric Status Describes how to use the Global namespace card. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing the Fabric Status Describes how to use the Global namespace card. About the Global namespace Card The Global namespace card shows the current status and software\n version for each fabric and allows you to perform certain fabric-level actions (subject to\n your role). These actions can include: Viewing fabric status and error information Viewing fabric access points Importing a fabric Creating a fabric Registering a fabric Adding an activation key Setting the billing model Importing an external S3 server Importing an external NFS server Upgrading fabric software (if a new software version is available) Deleting a fabric Reinitiating (retrying) an upgrade operation Viewing the Fabric Status To view the fabric status: Sign in to the Data Fabric UI , and switch to the Fabric manager view. Click Global namespace . Click the Table view icon to display the resource table with\n status values. Fabric Status Information Fabric status values include: Status Description Active An activation key has been added as described in Adding an Activation Key . Deleting Fabric removal, as described in Deleting a Fabric , is\n currently in progress. Expired The activation key for the fabric is no longer valid. Inactive An activation key has not been added for the fabric. See Adding an Activation Key . Install Failed There was a problem during installation, and the fabric was not successfully\n installed. Installed Fabric installation completed successfully. Installing Fabric installation is currently in progress. Upgrade Failed There was a problem during the software upgrade, and the fabric was not\n successfully upgraded. Upgrading A software upgrade is currently in progress. (Topic last modified: 2023-11-05) On this page About the Global namespace Card Viewing the Fabric Status Fabric Status Information \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_fabric_status_gns.html", + "title": "Viewing the Fabric Status" + }, + { + "content": "\nViewing Fabric Settings Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing,\n data auditing, and gateway information. About the Fabric Settings Fabric settings currently include: Setting Default Value Description Fabric auditing Off Auditing of fabric-management operations and fabric administration. Data auditing Off Auditing of data-access operations. Gateway N/A The Gateway parameter is not currently supported. For table replication,\n generate a DNS record that specifies the location of the gateways in the fabric,\n which can copy and paste into the zone file for your domain. Source fabrics can\n look up gateways during table replication using this record. Before generating\n the record, ensure that you have configured gateways in your\n cluster. Proxy configuration No proxy configured Enables the configuration of an https or http proxy for environments that\n have a proxy server. Viewing the Fabric Settings To view the Fabric Settings card: Sign in to the Data Fabric UI , and switch to the Fabric manager . Click Fabric administration . The Fabric\n Settings card appears under the Quota by card. Changing the Fabric Settings To change fabric settings: To Do this Turn on fabric auditing Click and drag the slider to the right. Turn on data auditing Click the Edit icon ( ), select On , and click Update. Copy the gateway information to the clipboard Click the the Copy icon ( ). Configure the proxy See Configuring a Proxy Server for Data Fabric Access to the Internet . More information Auditing Fabric and Fabric Data (Topic last modified: 2023-11-06) On this page About the Fabric Settings Viewing the Fabric Settings Changing the Fabric Settings \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_fabric_settings.html", + "title": "Viewing Fabric Settings" + }, + { + "content": "\nViewing the Fabric Endpoint Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Prerequisites You must be a fabric manager to perform this\n operation. About this task You can view the endpoint for a fabric from the Data Fabric UI . Procedure Log on to the Data Fabric UI . Select the Fabric manager from the\n dropdown next to the welcome message on the Home page. Click Global\n namespace . On the table view, click the ellipsis under Action for the fabric whose endpoint you wish to\n view. Click the View endpoint option. Results The fabric endpoint is displayed. The endpoint can be used\n to access the fabric and fabric resources. You can download the endpoint. (Topic last modified: 2023-12-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_fabric_endpoint.html", + "title": "Viewing the Fabric Endpoint" + }, + { + "content": "\nViewing the Software Version Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing the Software Version Describes several ways to identify the core software version for a fabric. View the Software Version with User Information To view the core software version: Sign in to the Data Fabric UI . In the upper right corner of the home screen, click the down arrow next to the user\n name. For example: Viewing the Software Version with Fabric Details To view the core software version: Sign in to the Data Fabric UI , and switch to the Fabric manager view. Click Global namespace . Click the Table view icon to display the resource table with\n status values. Click the fabric name to display the fabric details page. The core software version is\n displayed as the Build Version : More information Release History (Topic last modified: 2023-12-05) On this page View the Software Version with User Information Viewing the Software Version with Fabric Details \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_software_version.html", + "title": "Viewing the Software Version" + }, + { + "content": "\nGenerating S3 Access Keys for the Global Namespace Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Generating S3 Access Keys for\n the Global Namespace Describes how to obtain an S3\n user access key and secret key that can be used to perform operations on S3 resources anywhere\n in the global namespace. For the fabric manager you can generate and download a user access key and secret key.\n These keys facilitate command-line and programmatic (API) access to S3 resources for all\n fabrics in the namespace. For example, once you have generated the keys for the fabric\n manager, you can use MinIO client ( mc ) commands to set an alias for the\n fabric manager. Then you can use the same alias to perform operations on all fabrics in the\n namespace. For information about the supported mc commands, see MinIO Client (mc) Commands . You can generate the keys only twice for the same global namespace. More attempts to\n generate keys result in an error message. Use these steps: Sign in to your local fabric as a fabric manager. The Global\n namespace screen appears. In the Global namespace card, click the Graph view . Click the icon for the global namespace. Click View access points : The Access points screen is\n displayed. Click the S3 servers tab. The screen displays the Access keys\n and S3 server details: Click Generate key . A confirmation dialog box asks if you want\n to generate a new S3 key. Click Generate key . A dialog box displays the access key and\n secret key information. For example: Click Download JSON if you want to download the keys for the\n global namespace as a JSON file. Click Download if you want to download the S3 end points\n information as a JSON file. (Topic last modified: 2023-11-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/generating_s3_access_key.html", + "title": "Generating S3 Access Keys for the Global Namespace" + }, + { + "content": "\nSetting a Quota for a User Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Setting a Quota for a User Set quota for an individual user. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Setting a Quota for a User Set quota for an individual user. About this task You can set quotas for individual users via the Data Fabric UI. The following\n quotas can be set. Advisory quota, which raises an alarm when the threshold is reached, but\n does not prevent further writes. Hard quota, which raises an alarm when the limit is reached and prevents\n further writes.which raises an alarm when the limit is reached and prevents\n further writes. Advisory quota and hard quota can be expressed in\n megabytes (MB), gigabytes (GB), or terabytes (TB). GB is the default\n unit. The advisory quota must be less than the hard quota. Follow the steps given below to set quota for a user. Procedure Log on to the Data Fabric UI . Select Fabric manager option from\n the dropdown on the Home page. Click Fabric Administration . Select the users option in the dropdown for Quota by . Click Edit fabric quotas on the Quotas card. On the Edit Fabric Quotas dialog box, enter the values for user advisory\n quota, user hard quota, group advisory quota and group hard quota. Change the\n unit, as required. Specify the Fabric reserve limit . Click Update. Alternatively, you can edit the user quota for an individual user from the Settings tab for a fabric . To get to the Settings tab, Select the Fabric user on\n the Home page. Click the table view icon on the Resources card and click the fabric\n name on the table view of resources. Results The specified quota is saved for the individual\n user. NOTE: The default quota specified for the fabric applies to other groups,\n unless a group-specific quota is set for any other group on the\n fabric. (Topic last modified: 2023-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/set_quotas_for_individual_users.html", + "title": "Setting a Quota for a User" + }, + { + "content": "\nSetting a Quota for a Group Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Setting a Quota for a Group Set quota for an individual group. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Setting a Quota for a Group Set quota for an individual group. About this task You can set quotas for individual groups via the Data Fabric UI. This quota\n overrides the default group quota set for the fabric. The following quotas can be\n set. Advisory quota, which raises an alarm when the threshold is reached, but\n does not prevent further writes. Hard quota, which raises an alarm when the limit is reached and prevents\n further writes.which raises an alarm when the limit is reached and prevents\n further writes. Advisory quota and hard quota can be expressed in\n megabytes (MB), gigabytes (GB), or terabytes (TB). GB is the default\n unit. The advisory quota must be less than the hard quota. Follow the steps given below to set quota for a group. Procedure Log on to the Data Fabric UI . Select Fabric manager option from\n the dropdown on the Home page. Click Fabric Administration . Select the groups option in the dropdown for Quota by . Click Edit fabric quotas on the Quota by groups card. On the Edit Fabric Quotas dialog box, enter the values for advisory\n quota, hard quota. Change the unit, as required. Specify the Fabric reserve limit . Click Update. Alternatively, you can edit the group quota for an individual group from the Settings tab for a fabric. To get to the Settings tab, Select the Fabric user on\n the Home page. Click the table view icon on the Resources card and click the fabric\n name on the table view of resources. Results The specified quota is saved for the individual group. NOTE: The default quota specified for the fabric applies to other groups, unless a\n group-specific quota is set for any other group on the fabric. (Topic last modified: 2023-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/setting_quota_for_a_group.html", + "title": "Setting a Quota for a Group" + }, + { + "content": "\nViewing Fabric-Related Metrics Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . View Storage Consumption by User View fabric storage consumption by users. View System Resource Utilization by Fabric View system resource utilization for fabric. View Top Fabrics by Storage Capacity View up to top five fabrics by consumption of the available storage capacity. View Billing Data by Fabric View estimated billing charges alongside storage consumption. Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the Data Fabric UI . About this task You can view various useful fabric-related metrics on the Data Fabric UI . The metrics include data related to billing and storage consumption by fabric,\n storage usage by various users of the fabrics that are being monitored via the Data Fabric UI , and the top fabrics nearing the total storage\n capacity. Click any of the following links to view details about the fabric-related metrics\n that are visible on the Home Page of the Data Fabric UI . View Storage Consumption by User View fabric storage consumption by users. View System Resource Utilization by Fabric View system resource utilization for fabric. View Top Fabrics by Storage Capacity View up to top five fabrics by consumption of the available storage capacity. View Billing Data by Fabric View estimated billing charges alongside storage consumption. (Topic last modified: 2023-05-17) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_fabric_related_metrics.html", + "title": "Viewing Fabric-Related Metrics" + }, + { + "content": "\nView Storage Consumption by User Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . View Storage Consumption by User View fabric storage consumption by users. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . View Storage Consumption by User View fabric storage consumption by users. View System Resource Utilization by Fabric View system resource utilization for fabric. View Top Fabrics by Storage Capacity View up to top five fabrics by consumption of the available storage capacity. View Billing Data by Fabric View estimated billing charges alongside storage consumption. Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. View Storage Consumption by User View fabric storage consumption by users. Prerequisites You must be a fabric manager or an infrastructure admin to perform this\n operation. Users and/or groups must have been created. About this task View how much storage is used by individual fabric users via the Data Fabric UI . You can determine the fabric storage consumption trends by users on the fabrics that\n are being monitored via the Data Fabric UI . Viewing fabric storage consumption trends by groups is available if groups have been\n defined on the Data Fabric UI . The storage consumption by a user/group is aggregated storage size of volumes and\n topics owned by the user/group. NOTE: Bucket storage consumption is not included as there is no concept of bucket\n owner in Data Fabric. You can import a fabric to monitor the fabric usage via the Data Fabric UI . See Importing a Fabric for information importing fabrics. Procedure Log on to the Data Fabric UI . Select the Fabric manager from the\n dropdown next to the welcome message on the Home page. Click Fabric metrics on the Home page. Check the Storage use by Users card. Results You are able to view a list of all fabric users and/or groups in the order of the\n storage utilization. (Topic last modified: 2023-10-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_storage_consumption_by_user.html", + "title": "View Storage Consumption by User" + }, + { + "content": "\nView System Resource Utilization by Fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . View System Resource Utilization by Fabric View system resource utilization for fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . View Storage Consumption by User View fabric storage consumption by users. View System Resource Utilization by Fabric View system resource utilization for fabric. View Top Fabrics by Storage Capacity View up to top five fabrics by consumption of the available storage capacity. View Billing Data by Fabric View estimated billing charges alongside storage consumption. Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. View System Resource Utilization by Fabric View system resource utilization for fabric. Prerequisites You must be a fabric manager or an infrastructure admin or a fabric user to perform this\n operation. About this task View a graphical representation of CPU utilization and memory utilization by fabric\n for the selected time duration. Procedure Log on to the Data Fabric UI . If you are a fabric manager or an infrastructure admin, click and\n check the Fabric utilization card to view the CPU and memory utilization\n by fabric. If you are a fabric user, scroll down the Home page to view the System\n Resources - CPU and memory utilization card. Select the fabric and the time duration for which you wish to view the\n CPU and memory utilization of the fabric. Results For an infrastructure admin or a fabric manager, the CPU\n and memory utilization is seen as shown in the following image. For\n a fabric user, you can view the system utilization by the fabric during the selected\n time duration in a graphical format. The following image shows CPU and memory\n utilization by the two selected fabrics for the last 6 hours. (Topic last modified: 2023-10-17) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_system_resource_utilization.html", + "title": "View System Resource Utilization by Fabric" + }, + { + "content": "\nView Top Fabrics by Storage Capacity Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . View Top Fabrics by Storage Capacity View up to top five fabrics by consumption of the available storage capacity. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . View Storage Consumption by User View fabric storage consumption by users. View System Resource Utilization by Fabric View system resource utilization for fabric. View Top Fabrics by Storage Capacity View up to top five fabrics by consumption of the available storage capacity. View Billing Data by Fabric View estimated billing charges alongside storage consumption. Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. View Top Fabrics by Storage Capacity View up to top five fabrics by consumption of the available storage\n capacity. Prerequisites You must be a fabric manager or an infrastructure admin to perform this\n operation. One or more fabrics must have been created on or imported into Data Fabric. About this task View the top fabrics that have consumed maximum of the total storage available to the\n fabric. You can use this data to understand what fabrics are nearing the total storage\n capacity available to the fabric. Procedure Log on to the Data Fabric UI . Click Fabric\n metrics on the Home page. Scroll down to see the Fabric Storage card. Results You are able to see up to five fabrics that have consumed\n maximum storage capacity out of the available storage capacity available to the\n individual fabrics. If you have clicked the fabric area on the Fabric Storage card, you are navigated to the Resource page to be able to view the fabric resources. (Topic last modified: 2023-10-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_top_fabrics_by_storage_capacity.html", + "title": "View Top Fabrics by Storage Capacity" + }, + { + "content": "\nView Billing Data by Fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . View Billing Data by Fabric View estimated billing charges alongside storage consumption. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . View Storage Consumption by User View fabric storage consumption by users. View System Resource Utilization by Fabric View system resource utilization for fabric. View Top Fabrics by Storage Capacity View up to top five fabrics by consumption of the available storage capacity. View Billing Data by Fabric View estimated billing charges alongside storage consumption. Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. View Billing Data by Fabric View estimated billing charges alongside storage consumption. About this task If you set the billing model as part of\n installing the HPE Ezmeral Data Fabric , the Fabric Metrics page displays a Billing and\n Storage Consumption card. The Billing and Storage\n Consumption card shows estimates of your storage consumption charges\n alongside your aggregated and on-demand storage consumption. The estimates are\n approximate and are not guaranteed to reflect your actual charges. In the bar chart, dark blue represents consumption that is below the commit\n amount. Teal represents consumption above the commit amount. Only storage consumption information is displayed if you\n are logged on as a Developer role. If you log on as an Infrastructure Admin or\n Fabric Manager, both billing and storage consumption information are displayed. Procedure Sign in to the Data Fabric UI withFabric Manager credentials . Click Fabric\n metrics . Scroll down to see the Billing and Storage Consumption card. Results You see the billing charges for the specified fabrics. If\n the charges are not displayed, it is likely that you have not yet set the billing model . (Topic last modified: 2023-10-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_billing_data_for_fabric.html", + "title": "View Billing Data by Fabric" + }, + { + "content": "\nSetting Default Quotas for Users/Groups Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Prerequisites You must be a fabric manager to edit user quotas and group\n quotas for a fabric. About this task Quotas limit the disk space used by a volume or an entity such as a user or a group.\n A volume quota limits the space used by a volume. A user/group quota limits the\n space used by all volumes owned by a user or group. These quotas work on tenant\n volumes as well. You can set hard quota and advisory quota defaults for users and groups. When a user\n or group is created, the default quota and advisory quota apply unless overridden by\n specific quotas. Quotas can be specified in mega bytes (MB), gigabytes (GB),\n terabytes (TB), petabytes (PB), exabytes (EB), and zettabytes (ZB). User quota is the total space allocated to user on fabric. User hard quota is the total space allocated to user on fabric. Group quota is the total space allocated to group on fabric. Group hard quota is the total space allocated to group on fabric. Fabric reserve limit is the percentage of the total cluster capacity to allocate\n for the volumes on the cluster. The size of a disk space quota is expressed in terms of the actual data stored from\n the user's point of view. Only post-compression data blocks are counted, and\n snapshot and replica space do not count against quotas. For example, a 10G file that\n is compressed to 8G and has a replication factor of 3 consumes 24G (3*8G), but\n charges only 8G to the user or volume's quota. Follow the steps given below to set default quotas for users and/or groups. Procedure Log on to the Data Fabric UI . Select the Fabric manager from the dropdown next to the welcome message on\n the Home page. Click In the tabular list of fabrics on the Global namespace card, click the\n fabric name for which you wish to set the default user quota and/or group\n quota. Click Settings. Click the pencil/edit icon next to the Default quota . On the Edit Fabric Quotas dialog box, enter the values for user quota,\n user hard quota, group quota and group hard quota. Change the unit, as\n required. Specify the Fabric reserve limit . Click Update. Results The user quota, group quota, and fabric reserve limit\n specified for the fabric are saved. You can view the fabric default user quota and group\n quota on the Settings tab for the fabric. You may choose to specify quotas for\n individual users and/or groups. Such values override the default user quota and\n group quota for the respective users and/or groups. See Setting a Quota for a User for details. (Topic last modified: 2023-11-06) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/set_fabric_quotas.html", + "title": "Setting Default Quotas for Users/Groups" + }, + { + "content": "\nViewing the Fabric Service Status Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Viewing the Fabric Service Status View status of various services running on a fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing the Fabric Service Status View status of various services running on a fabric. Prerequisites You must have permissions to access and view the fabric details. About this task A fabric functions as a set of core services and monitoring services that run in\n the background. You can view the status of the services on the Data Fabric UI to determine if the services are running\n or they have stopped. This can be useful in preliminary troubleshooting of the fabric\n operation. Use the following steps to view fabric service status. Procedure Log on to the Data Fabric UI If you are a fabric user, click the Table View icon on the Resources card. If you are a fabric manager, select the Fabric\n manager option, and click Global namespace and check the table view. Click the link for the fabric under the Resource Name column. Navigate to the Services tab for the fabric. Results The details about the various fabric-related services along with the status of each\n service is visible on the Services tab. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_fabric_service_status.html", + "title": "Viewing the Fabric Service Status" + }, + { + "content": "\nView Capacity Usage by User on Fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets,\n topics , and binary tables created by the user that is logged\n in to the Data Fabric UI. About this task When you are logged in to the Data Fabric UI, you can view the total capacity for a\n fabric that is used up by the data stored on the volumes, buckets, topics , and binary tables created by you. NOTE: The capacity displayed on the My Capacity card is related to the\n storage resources on the selected fabric only. The capacity displayed on the\n Data Fabric UI is exclusive of any resources such as volumes, buckets,\n topics , binary tables that you might have\n created on an external NFS server or external S3\n server that has been added to the global namespace. Procedure Log on to the Data Fabric UI . Select Fabric user on the Home\n page. Check the My Capacity card. Select the fabric for which you wish to view the storage capacity that is used\n up by the volumes, buckets, topics , and binary tables that you have created. Alternatively, select all fabrics if you wish to view the\n capacity usage on your fabric resources for all fabrics. Results You are able to view the total capacity usage under Total , along with\n a categorized break-up of the storage capacity used up by the volumes, buckets, topics , and binary tables created by you on the selected fabric, or collectively for all fabrics if you have\n chosen to view the data for all fabrics. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_capacity_utilized_by_user_on_fabric.html", + "title": "View Capacity Usage by User on Fabric" + }, + { + "content": "\nSSH Access to a Cloud-Based Fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH\n access to a cloud-based fabric. Command-line access to cloud-based fabrics (AWS, Azure, or GCP) requires you to download a\n fabric-specific .pem file. The Data Fabric UI makes\n it easy to download the file. Note these considerations for downloading the .pem file: Only a user with the Fabric user or Fabric Manager role (or fc access)\n can use the Download SSH keys command. To connect to the fabric, you must provide the public IP address or public DNS name of\n any cloud fabric node. The public IP address or public DNS name are contained in the URL\n that you use to access the Data Fabric UI . The URL was provided\n when you performed the seed node installation to create your first fabric. SSH access to fabrics should only be used for troubleshooting operations under the\n supervision of HPE support personnel. SSH access should not be used for daily\n operations. Use these steps to download the .pem file: Sign in to the Data Fabric UI as a Fabric manager or Fabric\n user. If you are a Fabric manager, switch to the Fabric user experience. In the Resources card, click Table\n view . Under the Action column, click the ellipsis ( ). Click Download SSH keys . The Data Fabric UI downloads the .pem file as _key.pem . If necessary, copy the file to the workstation that you will use to ssh to the fabric.\n Suppose you copy the file to /root/myfabric-keypair.pem . Remember the\n path to that location. Reset the permissions on the downloaded .pem file to 0400 : chmod 0400 Use one of the following commands to connect to the fabric: AWS or\n GCP ssh -i \"\" rocky@ Azure ssh -i \"\" mapr@ (Topic last modified: 2024-01-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/ssh_access_to_cloud_fabric.html", + "title": "SSH Access to a Cloud-Based Fabric" + }, + { + "content": "\nDeleting a Fabric Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Deleting a Fabric Delete a remote fabric from the global namespace. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Configuring a Proxy Server for Data Fabric Access to the Internet Describes the procedure to configure an https or http proxy for scenarios where communication between the internet and Data Fabric must happen over a proxy server. Creating a Fabric Fabrics make it possible for you to create volumes, buckets, and topics in a cloud or on-premises deployment. If your organization has multiple departments or multiple use cases to support, you can create multiple fabrics. This page describes the basic steps to create a new fabric for any of the supported fabric providers (AWS, Azure, GCP, and on-premises). Importing a Fabric This section provides the steps to import an as-a-service fabric into the global namespace. Viewing the Fabric Status Describes how to use the Global namespace card. Viewing Fabric Settings Describes how to view and change the fabric settings, which include fabric auditing, data auditing, and gateway information. Viewing the Fabric Endpoint Describes how to view the endpoint for a fabric on the Data Fabric UI. Viewing the Software Version Describes several ways to identify the core software version for a fabric. Generating S3 Access Keys for the Global Namespace Describes how to obtain an S3 user access key and secret key that can be used to perform operations on S3 resources anywhere in the global namespace. Setting a Quota for a User Set quota for an individual user. Setting a Quota for a Group Set quota for an individual group. Viewing Fabric-Related Metrics Explains the various fabric-related metrics visible on the overview/data_fabric_ui.html . Setting Default Quotas for Users/Groups Set default values for user and group quotas on a fabric via the Data Fabric UI . Viewing the Fabric Service Status View status of various services running on a fabric. View Capacity Usage by User on Fabric Describes how to check the capacity used by various internal volumes, buckets, topics , and binary tables created by the user that is logged in to the Data Fabric UI. SSH Access to a Cloud-Based Fabric Describes how to obtain a fabric-specific .pem file that enables SSH access to a cloud-based fabric. Deleting a Fabric Delete a remote fabric from the global namespace. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Fabric Delete a remote fabric from the global namespace. Prerequisites You must be a fabric manager to delete a fabric. You must\n delete fabrics one at a time. You cannot delete multiple fabrics at the same\n time. To delete a remote fabric, you must have logged in with your single\n sign-on credentials. About this task A local fabric is the fabric by which you are logged onto the Data Fabric UI. Any\n fabric, other than the local fabric, that has been added to the global namespace, is\n a remote fabric. You can delete a remote fabric via the Data Fabric UI. You cannot delete a local\n fabric. If no remote fabrics are present, the Delete command\n is not available. CAUTION: When you delete a fabric, the fabric is uninstalled, and any\n running instances of the fabric are destroyed. The data on a fabric becomes\n inaccessible when you delete the fabric. Ensure that you do not need the data on\n the fabric or the required data is backed up, before you delete the\n fabric. Follow these steps to delete a fabric: Procedure Log on to the Data Fabric UI . Select Fabric manager from the dropdown on the Home page. Click Global\n namespace . Click the ellipsis under Actions for the fabric to delete from the\n global namespace. Click Delete . Confirm the fabric deletion. Results The fabric is deleted, removed, and uninstalled from the\n global namespace. (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/deleting_a_fabric.html", + "title": "Deleting a Fabric" + }, + { + "content": "\nAdministering Users and Roles Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . User and Role Management This page describes the roles supported by the HPE Ezmeral Data Fabric as-a-service platform. Viewing a List of Users Describes how to display a searchable list of Keycloak users that includes the names of the users and their roles. Configuring Email Notifications Describes how to configure the Simple Mail Transfer Protocol (SMTP) to send email notifications from the Data Fabric UI to specified email accounts. Viewing and Editing Access Control Information Describes how to find and use the Access Control card that shows the access privileges for users and groups. Access Control Expression Syntax This topic explains access control expression. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Users and Roles This section describes the operations you can perform related to users, groups, and\n roles for the HPE Ezmeral Data Fabric . User and Role Management This page describes the roles supported by the HPE Ezmeral Data Fabric as-a-service platform. Viewing a List of Users Describes how to display a searchable list of Keycloak users that includes the names of the users and their roles. Configuring Email Notifications Describes how to configure the Simple Mail Transfer Protocol (SMTP) to send email notifications from the Data Fabric UI to specified email accounts. Viewing and Editing Access Control Information Describes how to find and use the Access Control card that shows the access privileges for users and groups. Access Control Expression Syntax This topic explains access control expression. (Topic last modified: 2023-08-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/administering_users_and_roles.html", + "title": "Administering Users and Roles" + }, + { + "content": "\nUser and Role Management Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . User and Role Management This page describes the roles supported by the HPE Ezmeral Data Fabric as-a-service platform. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . User and Role Management This page describes the roles supported by the HPE Ezmeral Data Fabric as-a-service platform. Viewing a List of Users Describes how to display a searchable list of Keycloak users that includes the names of the users and their roles. Configuring Email Notifications Describes how to configure the Simple Mail Transfer Protocol (SMTP) to send email notifications from the Data Fabric UI to specified email accounts. Viewing and Editing Access Control Information Describes how to find and use the Access Control card that shows the access privileges for users and groups. Access Control Expression Syntax This topic explains access control expression. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. User and Role Management This page describes the roles supported by the HPE Ezmeral Data Fabric as-a-service platform. Roles and Permissions When SSO Is Configured SSO-configured fabrics support the following roles: Role Permissions Corresponding ACL Permission Code 1 Infrastructure Admin Permission to log in and start or stop services login, ss Fabric Manager Full control of the fabric, create volume permission, and login\n permission 2 login, cv, cp, fc Fabric User Login permission 2 and create volume permission login, cv, cp 1 Shows the equivalent access control list (ACL) permission code for the HPE Ezmeral Data Fabric \u2013 Customer Managed cluster. For more information, see Security Policy Permissions and Creating Cluster-Level ACLs . 2 The login user can log in to the Data Fabric UI and issue commands. Includes read access for existing objects. Resource Actions Supported for the Roles The following table shows the create, delete, and modify actions that each role can perform\n on various resources: Role Resource Create Delete Modify Fabric Manager Fabric Allow Allow Allow Volumes Allow Allow Allow Buckets Allow Allow Allow Directories Allow Allow Allow User Allow Allow Allow Accounts Allow Allow Allow Groups Allow Allow Allow S3 Keys Allow Allow Allow Objects Allow Allow Allow Security Policies Allow Allow Allow Storage Policies Allow Allow Allow Storage Tiers / Remote Targets Allow Allow Allow SMTP Configuration Allow Allow Allow Infrastructure Admin Resource Create Delete Modify Fabric Deny Deny Allow Volumes Deny Deny Deny Buckets Deny Deny Deny Directories Deny Deny Deny User Deny Deny Deny Accounts Deny Deny Deny Groups Deny Deny Deny S3 Keys Allow Allow Allow Objects Deny Deny Deny Security Policies Deny Deny Deny Storage Policies Deny Deny Deny Storage Tiers / Remote Targets Deny Deny Deny SMTP Configuration Deny Deny Deny Fabric User Resource Create Delete Modify Fabric Deny Deny Deny Volumes Allow Allow Allow Buckets Allow Allow Allow Directories Allow Allow Allow S3 Keys Allow Allow Allow Objects Allow Allow Allow Security Policies Allow Allow Allow Storage Policies Allow Allow Allow Storage Tiers / Remote Targets Allow Allow Allow SMTP Configuration Deny Deny Deny Displaying Role Information To display role information for the currently signed-in user: Sign in to the Data Fabric UI . In the upper right corner of the home screen, click the down arrow next to the user\n name. For example: Limitation for Non-SSO Users SSO users with sufficient credentials can view and manage resources on all fabrics. Non-SSO\n users can view and manage resources only on the fabric to which they are signed in. Non-SSO\n users cannot view or manage resources on other fabrics. The Data Fabric UI does not display these resources to non-SSO\n users because the UI cannot connect to other fabrics without the same login information. (Topic last modified: 2024-02-04) On this page Roles and Permissions When SSO Is Configured Resource Actions Supported for the Roles Displaying Role Information Limitation for Non-SSO Users \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/user_and_role_management.html", + "title": "User and Role Management" + }, + { + "content": "\nViewing a List of Users Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Viewing a List of Users Describes how to display a searchable list of Keycloak users that includes the names of the users and their roles. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . User and Role Management This page describes the roles supported by the HPE Ezmeral Data Fabric as-a-service platform. Viewing a List of Users Describes how to display a searchable list of Keycloak users that includes the names of the users and their roles. Configuring Email Notifications Describes how to configure the Simple Mail Transfer Protocol (SMTP) to send email notifications from the Data Fabric UI to specified email accounts. Viewing and Editing Access Control Information Describes how to find and use the Access Control card that shows the access privileges for users and groups. Access Control Expression Syntax This topic explains access control expression. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing a List of Users Describes how to display a searchable list of Keycloak users that includes the names of\n the users and their roles. The User management card can display only a small number of Keycloak\n users. Clicking the View all button displays a searchable,\n configurable, full-page listing of Keycloak users who have access to the Data Fabric UI . If you are a fabric manager, you can also edit\n the roles for a user. To display the full list of Keycloak users: Sign in to the Data Fabric UI , and switch to the Fabric manager or Infrastructure admin experience. Click Security administration . Scroll down to the User management card. Click View all . The list of Keycloak users is displayed. (Topic last modified: 2024-01-15) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_all_users.html", + "title": "Viewing a List of Users" + }, + { + "content": "\nConfiguring Email Notifications Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Configuring Email Notifications Describes how to configure the Simple Mail Transfer Protocol (SMTP) to send email notifications from the Data Fabric UI to specified email accounts. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . User and Role Management This page describes the roles supported by the HPE Ezmeral Data Fabric as-a-service platform. Viewing a List of Users Describes how to display a searchable list of Keycloak users that includes the names of the users and their roles. Configuring Email Notifications Describes how to configure the Simple Mail Transfer Protocol (SMTP) to send email notifications from the Data Fabric UI to specified email accounts. Viewing and Editing Access Control Information Describes how to find and use the Access Control card that shows the access privileges for users and groups. Access Control Expression Syntax This topic explains access control expression. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Configuring Email Notifications Describes how to configure the Simple Mail Transfer Protocol (SMTP) to send email\n notifications from the Data Fabric UI to specified email\n accounts. The Data Fabric UI can notify you by email when alarms\n are generated on a fabric. To configure email notifications, you must set up SMTP: Setting Up SMTP To set up SMTP: Sign in to the Data Fabric UI , and switch to the Fabric manager experience . Click Fabric administration . On the SMTP card, click Edit SMTP\n settings . The Edit SMTP settings form is\n displayed. Specify the following parameters: Parameter Description Example Provider* Select Office 365 , SMTP , or Other from the drop-down menu. If you select Office 365 , the SMTP server and port information is\n pre-filled for you. NOTE: Gmail is provided as an option, but is not currently\n supported because Gmail does not support unsecure emails from third-party\n applications. For more information, see this page . Office 365 SMTP server* The name of the mail server for the SMTP provider that you\n specified. smtp.office365.com This server requires an encrypted connection\n (SSL) Check this box if the connection to the SMTP server must be\n encrypted. N/A SMTP port* The SMTP port to use for sending mail. 587 Sender's full name* The name that the HPE Ezmeral Data Fabric should use\n when sending email. East Lab Data Fabric Sender's email address* The email address that the HPE Ezmeral Data Fabric should use when sending email. jennifer-huang87@outlook.com Sender's username (Optional) The user name that the HPE Ezmeral Data Fabric should use when logging on to the SMTP\n server. jennifer46 Sender's SMTP password (Optional) The password that the HPE Ezmeral Data Fabric should use when logging on to the SMTP\n server. mySMTP!pw Click Save . A message indicates if the configuration was\n successful. See Setting Up Alarm Notifications . Editing SMTP Settings After SMTP has been configured, you can edit the settings by clicking Edit SMTP\n settings , changing the parameter values as needed, and clicking Save . Setting Up Alarm Notifications Setting up SMTP does not by itself enable alarm notifications. You must also identify the\n alarms for which you want to be notified. Currently, setting up alarms must be done using\n the maprcli command line. You must run the config save command for each alarm where you want to generate an email. WARNING: You must\n have fc (full control) or a (admin) permissions to run\n this command. The format of the command\n is: maprcli alarm config save -cluster -values \",,\" Assign\n values as follows: Value Description Example alarm Name of the alarm. Specify the alarm name in uppercase with underscores. For\n a list of Data Fabric alarms, see Alarms Reference . DISK_FAILURE_ALARM enableEmail Specifies whether individual alarm notifications are sent to any email\n address (including the default email address) for the alarm type: 0 \u2013 Do not send notifications to any email address for the alarm type. 1 \u2013 Send notifications to all email addresses for the alarm type. 1 email One or more email addresses other than the default email address. If\n specified, alarm notifications are sent to these addresses as well, if enableEmail is set to 1 . Multiple email\n addresses must be separated by spaces only. You cannot use commas or other\n delimiters. For example, user1@mycorp.com user2@mycorp.com is\n valid. jennifer-huang87@outlook.com Example The following example command configures an email to be sent to test@example.com whenever the Node Alarm Core Present alarm is\n generated: maprcli alarm config save -values \"NODE_ALARM_CORE_PRESENT,1,test@example.com\" More information Viewing Alarms (Topic last modified: 2023-09-08) On this page Setting Up SMTP Editing SMTP Settings Setting Up Alarm Notifications \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/setting_up_smtp.html", + "title": "Configuring Email Notifications" + }, + { + "content": "\nViewing and Editing Access Control Information Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Viewing and Editing Access Control Information Describes how to find and use the Access Control card that shows the access privileges for users and groups. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . User and Role Management This page describes the roles supported by the HPE Ezmeral Data Fabric as-a-service platform. Viewing a List of Users Describes how to display a searchable list of Keycloak users that includes the names of the users and their roles. Configuring Email Notifications Describes how to configure the Simple Mail Transfer Protocol (SMTP) to send email notifications from the Data Fabric UI to specified email accounts. Viewing and Editing Access Control Information Describes how to find and use the Access Control card that shows the access privileges for users and groups. Access Control Expression Syntax This topic explains access control expression. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing and Editing Access Control Information Describes how to find and use the Access Control card that shows the access privileges\n for users and groups. Note the following prerequisites for viewing and changing access control information: You must have fabric manager permissions to view or change access control settings. The user or group for which you want to assign access must already be configured for the\n fabric. To add users or groups, see Adding New Users to Keycloak or Adding a Group to Keycloak . Viewing the Access Control Card To view the Access control card: Sign in to the Data Fabric UI , and switch to the Fabric manager view . Click Fabric administration . The Access\n Control card appears under the Fabric administration details. Changing Access Control Settings To add or change the access control information for a user or group: On the Access control card, click Edit\n access . The Data Fabric UI displays\n the current settings. For example: Refer to the following table to change access settings for a user or group: To Do this Add a new user or group Click +Add , specify the Type (User or Group), and select the desired access\n options. Change the access for an existing user or group Select or deselect the the desired access options. Remove a user or group Click the garbage can icon ( ). Click Save to save your changes, or click Close to exit without saving changes. (Topic last modified: 2023-11-05) On this page Viewing the Access Control Card Changing Access Control Settings \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_and_editing_access_control_info.html", + "title": "Viewing and Editing Access Control Information" + }, + { + "content": "\nAccess Control Expression Syntax Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Access Control Expression Syntax This topic explains access control expression. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . User and Role Management This page describes the roles supported by the HPE Ezmeral Data Fabric as-a-service platform. Viewing a List of Users Describes how to display a searchable list of Keycloak users that includes the names of the users and their roles. Configuring Email Notifications Describes how to configure the Simple Mail Transfer Protocol (SMTP) to send email notifications from the Data Fabric UI to specified email accounts. Viewing and Editing Access Control Information Describes how to find and use the Access Control card that shows the access privileges for users and groups. Access Control Expression Syntax This topic explains access control expression. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Access Control Expression Syntax This topic explains access control expression. An access control expression (ACE) is defined by a combination\n of user, group, or role definitions. You can combine these definitions using\n the following syntax: Operator Description u Username or user ID, as they appear in /etc/passwd , of a specific user. Usage: u: g Group name or group ID, as they appear in /etc/group, of a\n specific group. Usage: g: r Name of a specific role. Usage: r: . p Public. Specifies that this operation is available to the public\n without restriction. Cannot be combined with any other operator. API\n request or CLI command to save such settings will return an\n error. ! Negation operator. Usage: ! . & AND operation. | OR operation () Delimiters for subexpressions. \"\" The empty string indicates that no user has the specified\n permission. An example definition is u:1001 | r:engineering , which restricts\n access to the user with ID 1001 or to any user with the role engineering . In this next example, members of the group admin are given access,\n and so are members of the group qa : g:admin | g:qa Another example is to have a list of groups to which you want to give read\n permissions: To grant the read permission, you construct the following boolean expression: u:cfkane | (g:admin & !g:cl3) | (g:qa & (g:app2 | g:app3)) | (g:ba & g:dept_7a) | g:ds This expression is made up of five subexpressions which are separated by OR\n operators: The first subexpression u:cfkane grants the read permission\n to the username cfkane . The subexpression (g:admin & !g:cl3) grants the read\n permission to the admins for all clusters except cluster cl3 . The operator g is the group\n operator, the value admin is the name of the group of all\n admins. The & operator limits the number of\n administrators who have read permission because only those administrators\n who meet the additional condition will have it. The condition !g:cl3 is a limiting condition. The operator ! is the NOT operator. Combined with the group\n operator, this operator means that this group is excluded and does not\n receive the read permission. WARNING: Be careful when using the NOT operator. You might\n exclude fewer people than you intended. For example, suppose that you do\n not want anyone in the group group_a to have access.\n You therefore define this ACE: !g:group_a You might\n think that the data is now protected because members of group_a do not have access to it. However, you have\n not restricted access for anyone else except the members of group_a . The rest of the world can access the data.\n You should not define ACEs through exclusion by using the NOT operator.\n You should define them by inclusion and use the NOT operator to limit\n further the access of the groups or roles that you have included. In the\n subexpression (g:admin & !g:cl3) , the NOT operator\n limits the number of members within the admin group who have access. The admin group is included, and all users who are also\n part of the cl3 group are excluded. The subexpression (g:qa & (g:app2 | g:app3)) demonstrates use of a subexpression within a subexpression. The\n larger subexpression means that only members of group qa who are also members of group app2 or app3 have read access\n to the data. The smaller subexpression limits the number of people\n who have this permission in the qa group. (Topic last modified: 2023-08-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/access_control_expression_syntax.html", + "title": "Access Control Expression Syntax" + }, + { + "content": "\nAdministering Buckets Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Bucket Create a bucket on a fabric. Creating a Folder on a Bucket Create a folder on a bucket to store objects. Uploading Objects to a Bucket Upload one or more objects to a bucket. Downloading an Object from a Bucket Download an object from a bucket. Deleting an Object from a Bucket Delete an object from a bucket. Deleting a Folder from a Bucket Delete a folder from bucket. Deleting a Bucket Delete a bucket from fabric. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Buckets are storage resources that store objects, which consist of data and its descriptive\n metadata. Object-based storage is the preferred method of storing and efficiently managing gigantic\n volumes of data. Data are stored efficiently in a flat address space called as a storage pool\n and not as a tiered file structure. The address space is referenced by the metadata that holds\n the required information to retrieve the data. The metadata facilitates deep analysis of the\n usage and function of the data that is stored in the storage pool. The access protocol used in\n object storage architecture is TCP/IP and the communication medium is usually through REST\n APIs. Objects can comprise disparate types of unstructured data such as audio files, video files,\n and images. A user can store objects in the user's own account. Objects are stored inside containers\n called buckets. Every user can create buckets and set access policies to govern who can access\n the resources created by the user. Creating a Bucket Create a bucket on a fabric. Creating a Folder on a Bucket Create a folder on a bucket to store objects. Uploading Objects to a Bucket Upload one or more objects to a bucket. Downloading an Object from a Bucket Download an object from a bucket. Deleting an Object from a Bucket Delete an object from a bucket. Deleting a Folder from a Bucket Delete a folder from bucket. Deleting a Bucket Delete a bucket from fabric. (Topic last modified: 2023-04-25) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/administering_buckets.html", + "title": "Administering Buckets" + }, + { + "content": "\nCreating a Bucket Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Bucket Create a bucket on a fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Bucket Create a bucket on a fabric. Creating a Folder on a Bucket Create a folder on a bucket to store objects. Uploading Objects to a Bucket Upload one or more objects to a bucket. Downloading an Object from a Bucket Download an object from a bucket. Deleting an Object from a Bucket Delete an object from a bucket. Deleting a Folder from a Bucket Delete a folder from bucket. Deleting a Bucket Delete a bucket from fabric. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Bucket Create a bucket on a fabric. Prerequisites A fabric must be available for you to create a bucket. You must be a fabric user to create a bucket on the fabric. About this task Buckets can be created on a fabric that exists on a public\n cloud provider. When you create a bucket, you must ensure that the bucket name\n is globally unique for your fabric starts and ends with a lowercase letter or a number between 3 and 63 characters long contains characters in lowercase only You can enable locking of objects that are stored on the bucket. Object locking is\n useful when you wish to prevent overwriting of objects for a specific time duration.\n When you lock an object, multiple versions of the object can be stored. Each object\n version is unalterable. Once object locking is enabled on an object, you cannot disable\n it for the object. You can specify the retention mode and retention period. If you\n wish to create a versioned bucket to store multiple object versions with an object\n lock, you must select the Enable Object Lock check box. The Object\n versioning check box is auto-selected when you enable object locking. You can create a bucket to store multiple object versions without enabling an\n object lock. In this case, you must select the Object versioning check box\n and leave the Enable Object Lock check box deselected. NOTE: Currently, a bucket can be created by a non-SSO user only. Follow the steps given below to create a bucket. Procedure Log on to the Data Fabric UI . Select Fabric user on the Home\n page. Click Create Bucket on the Resources card . Enter the Name . Select the fabric on which you wish to create the bucket. Enter the Account name that owns the bucket. Select the Enable Object Lock check box to enable object versioning and\n prevent deletion of objects. This is an optional step. Select the Object versioning check box if you wish to enable object\n versioning, but do not wish to disable deletion of objects. This is an optional\n step. Click Create . Results The bucket is created on the fabric. You can now upload\n objects to the bucket. You can create one or more folders on the bucket to store\n objects. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . mc mb (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/create_bucket.html", + "title": "Creating a Bucket" + }, + { + "content": "\nCreating a Folder on a Bucket Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Folder on a Bucket Create a folder on a bucket to store objects. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Bucket Create a bucket on a fabric. Creating a Folder on a Bucket Create a folder on a bucket to store objects. Uploading Objects to a Bucket Upload one or more objects to a bucket. Downloading an Object from a Bucket Download an object from a bucket. Deleting an Object from a Bucket Delete an object from a bucket. Deleting a Folder from a Bucket Delete a folder from bucket. Deleting a Bucket Delete a bucket from fabric. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Folder on a Bucket Create a folder on a bucket to store objects. About this task You can create folders on a bucket to segregate the objects to store on the\n bucket. You can create subfolders inside folders. Follow the steps given\n below to create a folder on a bucket. Procedure Log on to the Data Fabric UI . Select Fabric user from the\n dropdown on the Home page. Click the Table view icon on the Resources card. In the tabular\n list of fabrics, click the down arrow for the fabric that contains the bucket to\n which you wish to upload objects (files). Click the bucket name seen under Resource Name . Click Create Folder on the Objects tab. NOTE: If you are creating a sub-folder, you must navigate into the folder\n where you wish to create the sub-folder, and then click Create\n Folder . Enter the Folder name . Click Create . Results The folder is created in the bucket. You can store objects\n in the folder. (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/creating_folder_on_bucket.html", + "title": "Creating a Folder on a Bucket" + }, + { + "content": "\nUploading Objects to a Bucket Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Uploading Objects to a Bucket Upload one or more objects to a bucket. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Bucket Create a bucket on a fabric. Creating a Folder on a Bucket Create a folder on a bucket to store objects. Uploading Objects to a Bucket Upload one or more objects to a bucket. Downloading an Object from a Bucket Download an object from a bucket. Deleting an Object from a Bucket Delete an object from a bucket. Deleting a Folder from a Bucket Delete a folder from bucket. Deleting a Bucket Delete a bucket from fabric. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Uploading Objects to a Bucket Upload one or more objects to a bucket. Prerequisites Your account must have the permission to upload object on the bucket. The bucket to which you wish to upload object must have enough space to store\n the object. About this task You can upload one or more objects of size upto 1 GB to a bucket. If object versioning is enabled on the bucket, you can store multiple files with the\n same name on the bucket. Follow the steps given below to upload objects to a bucket. Procedure Log on to the Data Fabric UI . Select Fabric user from the\n dropdown on the Home page. Click the Table view icon on the Resources card . In the tabular\n list of fabrics, click the down arrow for the fabric that contains the bucket to\n which you wish to upload objects (files). Click the bucket name seen under Resource Name . Click Upload Objects on the Objects tab. Click browse to select one or more files to upload. Alternatively, drag\n and drop one or more files to Upload files area. If you wish to store the files to upload as versions of the same object, enter\n the Destination file name . The files are stored as multiple versions of\n the same object in this case. Click Upload . Results The selected files are successfully uploaded to the object\n store and appended to the list of objects for the fabric seen on the Data Fabric\n UI. If you have uploaded multiple files with the same name, that is, if you have\n uploaded multiple versions of an object, the multiple files are seen having the same\n file name with a version number in parentheses. (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/uploading_an_object_to_bucket.html", + "title": "Uploading Objects to a Bucket" + }, + { + "content": "\nDownloading an Object from a Bucket Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Downloading an Object from a Bucket Download an object from a bucket. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Bucket Create a bucket on a fabric. Creating a Folder on a Bucket Create a folder on a bucket to store objects. Uploading Objects to a Bucket Upload one or more objects to a bucket. Downloading an Object from a Bucket Download an object from a bucket. Deleting an Object from a Bucket Delete an object from a bucket. Deleting a Folder from a Bucket Delete a folder from bucket. Deleting a Bucket Delete a bucket from fabric. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Downloading an Object from a Bucket Download an object from a bucket. Prerequisites You must have the permission to download an object from a bucket. About this task You can download one or more objects that are stored on a bucket. You can download\n only one object at a time. Follow the steps given below to download an object from\n the bucket. Procedure Log on to the Data Fabric UI . Select Fabric User on the Home page. Click the Table View icon on the Resources card . In the tabular list of fabrics, click the down arrow for the\n fabric that contains the bucket from which you wish to download the\n object(file). Click the bucket name seen under Resource Name . Click the ellipsis seen under Actions for the object row to download on\n the Objects tab. Click the Download option. Results The object is downloaded to the default download folder on\n your machine or to the folder that you select. The destination folder for the object\n depends on your web browser settings. (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/download_object_from_a_bucket.html", + "title": "Downloading an Object from a Bucket" + }, + { + "content": "\nDeleting an Object from a Bucket Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Deleting an Object from a Bucket Delete an object from a bucket. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Bucket Create a bucket on a fabric. Creating a Folder on a Bucket Create a folder on a bucket to store objects. Uploading Objects to a Bucket Upload one or more objects to a bucket. Downloading an Object from a Bucket Download an object from a bucket. Deleting an Object from a Bucket Delete an object from a bucket. Deleting a Folder from a Bucket Delete a folder from bucket. Deleting a Bucket Delete a bucket from fabric. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting an Object from a Bucket Delete an object from a bucket. Prerequisites You must have the permission to delete an object from a bucket. About this task You can delete versioned and unversioned objects from a bucket. When you delete a versioned object, the object is merely marked for deletion and not\n actually deleted from the bucket. When you delete an unversioned object, the object is permanently removed from the\n bucket. Follow the steps given below to delete objects from a bucket. Procedure Log on to the Data Fabric UI . Click the Table View icon on the Resources card . In the tabular list of fabrics, click the down arrow for the\n fabric that contains the bucket from which you wish to delete objects\n (files). Click the bucket name seen under Resource Name . Click the ellipsis seen under Actions for the object row to delete on Objects tab. Click the Delete option. Click Delete on the message box that appears. Results If the object is unversioned, the object is permanently\n removed from the bucket. If the object is versioned, it is marked for deletion. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . mc rm (Topic last modified: 2024-01-29) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/delete_object_bucket.html", + "title": "Deleting an Object from a Bucket" + }, + { + "content": "\nDeleting a Folder from a Bucket Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Deleting a Folder from a Bucket Delete a folder from bucket. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Bucket Create a bucket on a fabric. Creating a Folder on a Bucket Create a folder on a bucket to store objects. Uploading Objects to a Bucket Upload one or more objects to a bucket. Downloading an Object from a Bucket Download an object from a bucket. Deleting an Object from a Bucket Delete an object from a bucket. Deleting a Folder from a Bucket Delete a folder from bucket. Deleting a Bucket Delete a bucket from fabric. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Folder from a Bucket Delete a folder from bucket. Prerequisites The folder to delete must not contain objects. You must have the permission to delete a folder from a bucket. About this task You can delete a folder from a bucket when the folder does not contain any\n objects. Follow the steps given below to delete a folder from a bucket. Procedure Log on to the Data Fabric UI . Click the Table View icon on the Resources card. In the tabular list of fabrics, click the down arrow for the fabric that\n contains the bucket from which you wish to delete a folder Click the bucket name seen under Resource Name . Click the ellipsis seen under Actions for the folder row to delete on Objects tab. Click the Delete option. Click Delete on the message box that appears. Results The folder is deleted from the bucket. (Topic last modified: 2024-01-12) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/deleting_a_folder_from_bucket.html", + "title": "Deleting a Folder from a Bucket" + }, + { + "content": "\nDeleting a Bucket Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Deleting a Bucket Delete a bucket from fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Creating a Bucket Create a bucket on a fabric. Creating a Folder on a Bucket Create a folder on a bucket to store objects. Uploading Objects to a Bucket Upload one or more objects to a bucket. Downloading an Object from a Bucket Download an object from a bucket. Deleting an Object from a Bucket Delete an object from a bucket. Deleting a Folder from a Bucket Delete a folder from bucket. Deleting a Bucket Delete a bucket from fabric. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Bucket Delete a bucket from fabric. Prerequisites The bucket to delete must be empty. You must have the permission to delete a bucket. About this task You can delete a bucket from a fabric. Follow the steps given below to delete objects from a bucket. Procedure Log on to the Data Fabric UI . Click the Table View icon on the Resources card . In the tabular list of fabrics, click the down arrow for the\n fabric that contains the bucket that you wish to delete. Locate the row for the bucket to delete in the Resources seen under the\n fabric. Click the ellipsis seen under the Actions column for the bucket\n row. Click the Delete option. Click Delete on the message box that appears. Results The bucket is deleted from the fabric. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . mc rb (Topic last modified: 2023-06-30) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/deleting_bucket.html", + "title": "Deleting a Bucket" + }, + { + "content": "\nAdministering Tables Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Tables on HPE Ezmeral Data Fabric provide a native implementation of the\n HBase Table for improved scalability, stability, and speed on the Data Fabric platform. With HPE Ezmeral Data Fabric tables, you can: Create and access tables in the Data Fabric UI . Access tables with a global path. List and filter tables. Create and manage access controls through the Data Fabric UI . Enable read optimization to improve the speed of large read workloads. Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. (Topic last modified: 2023-10-30) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/administering_tables.html", + "title": "Administering Tables" + }, + { + "content": "\nManaging Tables Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Creating a Table This topic describes creating a table. Deleting a Table This topic describes deleting a table. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Managing Tables The topics in this section describe managing tables. Creating a Table This topic describes creating a table. Deleting a Table This topic describes deleting a table. (Topic last modified: 2023-10-23) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/managing_tables.html", + "title": "Managing Tables" + }, + { + "content": "\nCreating a Table Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Creating a Table This topic describes creating a table. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Creating a Table This topic describes creating a table. Deleting a Table This topic describes deleting a table. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Table This topic describes creating a table. Prerequisites A fabric must be available for you to create a table. You must have the permissions to create a table on the fabric. About this task Follow the steps given below to create a table. Procedure Log on to the Data Fabric UI . Select Fabric user on the Home page. Click Table on the Resources card. The Create Table side drawer opens. Enter the table Name . The table Path field is automatically filled. However,\n you can edit the Path if desired. The table is automatically assigned a fabric-specific full path, which you can\n use to access the table from the global namespace. Select the name of the Fabric on which you want to\n create the table from the dropdown menu. Enter a Column family name for your table. During the\n table creation process, you can create only one column family. Click Create . Results The table is created on the fabric. After successful table\n creation, you can add additional column families to the table, as described in Creating a Column Family . (Topic last modified: 2023-10-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/creating_a_table.html", + "title": "Creating a Table" + }, + { + "content": "\nDeleting a Table Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Deleting a Table This topic describes deleting a table. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Creating a Table This topic describes creating a table. Deleting a Table This topic describes deleting a table. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Table This topic describes deleting a table. Prerequisites You must have the permissions to delete tables on the fabric. About this task Follow the steps given below to delete a table. Procedure Log on to the Data Fabric UI . Select Fabric user on the Home page. Click the Table View icon on the Resources card. Click the ellipsis ( ) in the Action column of the\n table you want to delete. Click Delete . Results The table is deleted from the fabric. (Topic last modified: 2023-10-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/deleting_a_table.html", + "title": "Deleting a Table" + }, + { + "content": "\nManaging Column Families and Columns Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Column Families and Columns The topics in this section describe managing column families and columns. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Creating a Column Family This topic describes creating a column family. Configuring Column Family Permissions This topic describes configuring column family permissions for a table. Deleting a Column Family This topic describes deleting a column family. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Managing Column Families and Columns The topics in this section describe managing column families and columns. Creating a Column Family This topic describes creating a column family. Configuring Column Family Permissions This topic describes configuring column family permissions for a table. Deleting a Column Family This topic describes deleting a column family. (Topic last modified: 2023-10-23) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/managing_column_families_and_columns.html", + "title": "Managing Column Families and Columns" + }, + { + "content": "\nCreating a Column Family Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Column Families and Columns The topics in this section describe managing column families and columns. Creating a Column Family This topic describes creating a column family. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Creating a Column Family This topic describes creating a column family. Configuring Column Family Permissions This topic describes configuring column family permissions for a table. Deleting a Column Family This topic describes deleting a column family. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Column Family This topic describes creating a column family. Prerequisites A table must be available for you to create a column family. You must have the permissions to create column families on the table. About this task Follow the steps given below to create a column family on a table. Procedure Log on to the Data Fabric UI . Select Fabric user on the Home page. Click the Table View icon on the Resources card. In the tabular list of fabrics, click\n the down arrow for the fabric that contains the table on which you want to\n create a column family. Click the table name seen under Resource Name . Click Add column family on the Column\n families tab. The Add column family wizard opens on the Column family details screen. Enter the Column family name . Enter the Max version and Min\n version for your column family. This is the the maximum and\n minimum number of versions that are retained upon revision to the column\n family. Click the toggle to enable Read optimization . Read\n optimzation enables in-memory caching for read workloads, greatly improving the\n speed of read operation for large workloads. (Optional) Create user access controls for your column family. The wizard automatically creates a default column family Access\n control entry. You can edit the default entry, or delete it with\n the action button (trash can). Select the user Type . Enter the user Name . Select the permissions you want the user to have: Read : The user has read permissions on\n the column family. Write : The user has write permissions on\n the column family. Append : The user has append permissions\n on the column family. Version : The user can roll the column\n family back to a previous version. (Optional) Click Add to create another\n user access control for the column family. Click Manage column permissions . The Add column family wizard opens the Manage column permissions window. From this window,\n you can create columns and column-specific access controls. (Optional) To create a column in the column family, enter the column\n name. (Optional) Create user access controls for your column. The wizard automatically creates a default column Access\n control entry. You can edit the default entry, or delete it with\n the action button (trash can). Select the user Type . Enter the user Name . Select the permissions you want the user to have: Read : The user can read the entries in\n the column. Write : The user can alter the existing\n column entries. Append : The user can add new records to\n the column. (Optional) Click Add to create another\n user column in the column family. Click Apply . Results The column family is created on the table with the defined\n access controls and columns. (Topic last modified: 2023-11-06) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/creating_a_column_family.html", + "title": "Creating a Column Family" + }, + { + "content": "\nConfiguring Column Family Permissions Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Column Families and Columns The topics in this section describe managing column families and columns. Configuring Column Family Permissions This topic describes configuring column family permissions for a table. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Creating a Column Family This topic describes creating a column family. Configuring Column Family Permissions This topic describes configuring column family permissions for a table. Deleting a Column Family This topic describes deleting a column family. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Configuring Column Family Permissions This topic describes configuring column family permissions for a table. Prerequisites A table with a column family must be available for you to configure column\n family permissions. You must have the permissions to configure column families on the table. About this task Follow the steps given below to create or edit column family permissions on a\n table. Procedure Log on to the Data Fabric UI . Select Fabric user on the Home page. Click the Table View icon on the Resources card. In the tabular list of fabrics, click\n the down arrow for the fabric that contains the table on which you want to\n create or edit column family permissions. Click the table name seen under Resource Name . The Table details view opens. Select the Column families tab. On this tab, you can\n view all column families created on the table. Perform one of the following: Click the ellipsis ( ) in the Action column of the column family for which\n you want to create or edit column family permissions. Click the name of the column family for which you want to create or edit\n column family permissions. The Column family\n details view opens. Then, click the ellipsis ( ). Click Edit . The column family wizard opens. Click Manage column permissions . The column family wizard opens the Manage column\n permissions window. From this window, you can create columns and\n column-specific user access controls. Enter the column name. Define user access controls for your column. The wizard automatically creates a default column Access\n control entry. You can edit the default entry, or delete it with\n the action button (trash can). Select the user Type . Enter the user Name . Select the permissions you want the user to have: Read : The user has read permissiosn on\n the column. Write : The user has write permissions on\n the column. Append : The user has append permissions\n on the column. (Optional) Click Add to create another\n user access control for the column. (Optional) Click Add column permissions to define\n user access controls for another column. Click Apply . Results The permissions are applied to the column family with the\n defined user access controls on the columns. (Topic last modified: 2023-11-14) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/configuring_column_family_permissions.html", + "title": "Configuring Column Family Permissions" + }, + { + "content": "\nDeleting a Column Family Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Column Families and Columns The topics in this section describe managing column families and columns. Deleting a Column Family This topic describes deleting a column family. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Creating a Column Family This topic describes creating a column family. Configuring Column Family Permissions This topic describes configuring column family permissions for a table. Deleting a Column Family This topic describes deleting a column family. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Column Family This topic describes deleting a column family. Prerequisites You must have the permissions to delete column families from the table. About this task Follow the steps given below to delete a column family from a table. Procedure Log on to the Data Fabric UI . Select Fabric user on the Home page. Click the Table View icon on the Resources card. In the tabular list of fabrics, click\n the down arrow for the fabric that contains the table on which you want to\n delete a column family. Click the table name seen under Resource Name . Select the Column families tab. Click the ellipsis ( ) in the Action column of the\n column family you want to delete. Click Delete . Results The column family is deleted from the table. (Topic last modified: 2023-10-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/deleting_a_column_family.html", + "title": "Deleting a Column Family" + }, + { + "content": "\nViewing Table Information Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Viewing Table Information The topics in this section describe viewing table information. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Viewing the List of Tables View the list of tables created on a fabric. Viewing Column Families View column families created on a table. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing Table Information The topics in this section describe viewing table information. Viewing the List of Tables View the list of tables created on a fabric. Viewing Column Families View column families created on a table. (Topic last modified: 2023-10-23) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_table_information.html", + "title": "Viewing Table Information" + }, + { + "content": "\nViewing the List of Tables Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Viewing Table Information The topics in this section describe viewing table information. Viewing the List of Tables View the list of tables created on a fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Viewing the List of Tables View the list of tables created on a fabric. Viewing Column Families View column families created on a table. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing the List of Tables View the list of tables created on a fabric. About this task Use the following steps to view tables created on a fabric. Procedure Log on to the Data Fabric UI . Under the default Fabric user , click the Graph View icon on the Resources card. The Graph View opens: To open the tabular list of tables, hover over the tables card in Graph View and\n click View in table . Alternatively, under the default Fabric user experience , click the Table\n View icon on the Resources card. The Table View opens: To view information about a specific table, click the name of the table in Table View . (Topic last modified: 2023-10-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_the_list_of_tables.html", + "title": "Viewing the List of Tables" + }, + { + "content": "\nViewing Column Families Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Viewing Table Information The topics in this section describe viewing table information. Viewing Column Families View column families created on a table. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Viewing the List of Tables View the list of tables created on a fabric. Viewing Column Families View column families created on a table. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing Column Families View column families created on a table. About this task You can view column families from the Data Fabric UI . Use the following steps to view column families. Procedure Log on to the Data Fabric UI . Under the default Fabric user , click the Table View icon on the Resources card. In the tabular list of fabrics, click\n the down arrow for the fabric that contains the table. Click the name of the table for which you want to view column families. The table Overview screen opens. Click the Column families tab. Results You can now view the list of column families. (Topic last modified: 2023-10-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_column_families.html", + "title": "Viewing Column Families" + }, + { + "content": "\nManaging Table Replication Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Table Replication The topics in this section describe managing table replication. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Adding a Table Replica This topic describes adding a table replica. Viewing Table Replicas View the list of created table replicas. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Managing Table Replication The topics in this section describe managing table replication. You can use table replication to improve data availability and load balancing. Availability : Replicate tables between different fabrics within your\n cluster. Load balancing : Use table replicas to reduce the load on the primary\n table. For example, you can run data analysis tasks on a replica of the primary\n table rather than the primary table itself. Adding a Table Replica This topic describes adding a table replica. Viewing Table Replicas View the list of created table replicas. (Topic last modified: 2024-01-09) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/managing_table_replication.html", + "title": "Managing Table Replication" + }, + { + "content": "\nAdding a Table Replica Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Table Replication The topics in this section describe managing table replication. Adding a Table Replica This topic describes adding a table replica. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Adding a Table Replica This topic describes adding a table replica. Viewing Table Replicas View the list of created table replicas. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Adding a Table Replica This topic describes adding a table replica. Prerequisites A table must be available for you to create a replica. You must have the permissions to create table replicas. About this task Follow the steps given below to create a table replica. Procedure Log on to the Data Fabric UI . Click the Table View icon on the Resources card. In the\n tabular list of fabrics, click the down arrow for the fabric that contains the\n table for which you want to create a replica. Click the table name seen under Resource Name . Click Add replica on the Replication tab. The wizard automatically fills the name of the Source\n fabric and Path to source table . Select the Destination fabric from the dropdown\n menu. Enter the Replica path . (Optional) : Click the toggle to enable Advanced\n options : Throttle : Enable throttling to limit the speed of\n connection. Use this option to minimize the impact on the primary table,\n especially when under heavy load. Synchronous : Enable synchronous replication. Encrypt on wire : Enable this option to encrypt\n the replicated data during transfer. Click Column families . The Add replica wizard opens the Column\n families window. Select the column families you want to replicate. You can either: Replicate all column families . Replicate specific column families : Click the toggle for the column families you want to\n replicate. For each column family, you can either replicate All\n columns in the column family or you can Assign columns to replicate. If you select Assign columns , enter the\n column name in the Column name field.\n Click Add to add another column to\n replicate. Click Add . Results The table replica is created in the specified\n location. (Topic last modified: 2024-01-23) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/adding_a_table_replica.html", + "title": "Adding a Table Replica" + }, + { + "content": "\nViewing Table Replicas Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Table Replication The topics in this section describe managing table replication. Viewing Table Replicas View the list of created table replicas. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Adding a Table Replica This topic describes adding a table replica. Viewing Table Replicas View the list of created table replicas. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing Table Replicas View the list of created table replicas. Prerequisites You must have permission to view table replicas. About this task You can view table replicas from the Data Fabric UI . Use the following steps to view table\n replicas. Procedure Log on to the Data Fabric UI . Under the default Fabric user experience , click the Table View icon on the Resources card. In the tabular list of fabrics, click\n the down arrow for the fabric that contains the table replica. Click the name of the table for which you want to view replicas The table Overview screen opens. Click the Replication tab. Results You can now view the list of table replicas. (Topic last modified: 2024-01-09) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_table_replicas.html", + "title": "Viewing Table Replicas" + }, + { + "content": "\nAdministering Access Controls for Tables Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Access Controls for Tables This topic describes administering access controls for tables. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Managing Tables The topics in this section describe managing tables. Managing Column Families and Columns The topics in this section describe managing column families and columns. Viewing Table Information The topics in this section describe viewing table information. Managing Table Replication The topics in this section describe managing table replication. Administering Access Controls for Tables This topic describes administering access controls for tables. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Access Controls for Tables This topic describes administering access controls for tables. Prerequisites To add or edit user access controls, you must have Admin permissions on the\n table. About this task Follow the steps given below to create user access controls for a table. Procedure Log on to the Data Fabric UI . Under the default Fabric user , click the Table View icon on the Resources card. In the tabular list of fabrics, click\n the down arrow for the fabric that contains the table for which you want to\n manage access controls. Click the name of the table for which you want to manage access controls. The table Overview screen opens. Click the Settings tab. Click the Access Control button. The Access Control wizard opens. Select the user type for the access control. Enter the user name. Select the permissions you want the user to have: Admin (Access control) : The user can create,\n edit, and delete access controls for the table. Create/rename column family Delete column family (Optional) Click Add to create another access\n control for the table. Click Save . (Topic last modified: 2023-10-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/administering_access_controls_for_tables.html", + "title": "Administering Access Controls for Tables" + }, + { + "content": "\nAdministering Topics Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Creating a Topic Create a topic for Apache Kafka Wire Protocol. Editing a Topic Edit a topic for Apache Kafka Wire Protocol. Deleting a Topic Delete a topic for Apache Kafka Wire Protocol. Viewing or Downloading Topic Connection Properties View and download connection properties related a topic for Apache Kafka Wire Protocol. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Data Fabric supports creation of topics for Apache Kafka Wire Protocol via the Data Fabric UI . A topic is given a unique name that is used to categorize and organize messages based on\n common properties. One or more producers publish messages to topics. Individual consumers subscribe to the\n topics of their choice to consume the messages that are published to such topics. For example, a producer could be publishing weather-related data such as daily rainfall to a\n topic called dailyrain. A consumer could be subscribing to the topic containing the daily\n rainfall. Creating a Topic Create a topic for Apache Kafka Wire Protocol. Editing a Topic Edit a topic for Apache Kafka Wire Protocol. Deleting a Topic Delete a topic for Apache Kafka Wire Protocol. Viewing or Downloading Topic Connection Properties View and download connection properties related a topic for Apache Kafka Wire Protocol. (Topic last modified: 2023-07-31) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/administering_topics.html", + "title": "Administering Topics" + }, + { + "content": "\nCreating a Topic Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Creating a Topic Create a topic for Apache Kafka Wire Protocol. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Creating a Topic Create a topic for Apache Kafka Wire Protocol. Editing a Topic Edit a topic for Apache Kafka Wire Protocol. Deleting a Topic Delete a topic for Apache Kafka Wire Protocol. Viewing or Downloading Topic Connection Properties View and download connection properties related a topic for Apache Kafka Wire Protocol. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Topic Create a topic for Apache Kafka Wire Protocol. Prerequisites Topic with the name must not already exist on the fabric. You must be a fabric user to create a topic on the fabric. About this task One or more topics for Apache Kafka Wire Protocol can be created on a fabric, via the Data Fabric UI . Data is stored in topics in the form of messages for the retention period specified\n while creating the topic. The retention period can be specified in seconds, minutes,\n hours, or days. The default unit for retention period is days. Data can be stored in uncompressed and compressed formats in topics. By default, data\n on topics is uncompressed. Data compression helps reduce network bandwidth usage and saves disk space. Data\n compression results in higher CPU utilization. Data Fabric supports the following algorithms to compress data in a topic. LZ4 - LZ4 is an extremely fast /high-speed lossless compression algorithm. LZF \u2013 java-based compression algorithm that is optimized for speed with modest\n data compression ZLib \u2013 Zlib is an open-source mechanism to provide for lossless\n compression. If you wish to store data in a compressed format on the topic, you can select the\n compression algorithm based on your environment and requirement. Follow the steps given below to create a topic. Procedure Log on to the Data Fabric UI . Select Fabric user option from the\n dropdown on the Home page. Click Topic seen next to Create on the Resources card . Enter the topic name. Select the fabric on which you wish to create the topic. Enter the number of partitions for the topic Enter time to live for the topic. This is the duration for which messages are\n retained within the topic. Choose the compression type. Click Create. Results The topic is created on the specified fabric. A message\n regarding the topic creation is displayed on the Data Fabric UI. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . stream topic create (Topic last modified: 2023-11-06) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/creating_kafka_topic.html", + "title": "Creating a Topic" + }, + { + "content": "\nEditing a Topic Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Editing a Topic Edit a topic for Apache Kafka Wire Protocol. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Creating a Topic Create a topic for Apache Kafka Wire Protocol. Editing a Topic Edit a topic for Apache Kafka Wire Protocol. Deleting a Topic Delete a topic for Apache Kafka Wire Protocol. Viewing or Downloading Topic Connection Properties View and download connection properties related a topic for Apache Kafka Wire Protocol. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Editing a Topic Edit a topic for Apache Kafka Wire Protocol. Prerequisites Topic to edit must exist. You must have permission to edit a topic. About this task You can edit a topic for Apache Kafka Wire Protocol via the Data Fabric UI . The following fields related to a topic are editable. Number of partitions Time to live Compression scheme Follow the steps given below to edit a topic. Procedure Log on to the Data Fabric UI . Under the default Fabric user ,\n click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the topic to\n edit. Click the topic name seen under Resource Name . Edit the topic as required. Click Save . Results The changes made to the topic are saved. A message regarding\n the successful modification is displayed on the Data Fabric UI . Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . stream topic edit (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/editing_kafka_topic.html", + "title": "Editing a Topic" + }, + { + "content": "\nDeleting a Topic Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Deleting a Topic Delete a topic for Apache Kafka Wire Protocol. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Creating a Topic Create a topic for Apache Kafka Wire Protocol. Editing a Topic Edit a topic for Apache Kafka Wire Protocol. Deleting a Topic Delete a topic for Apache Kafka Wire Protocol. Viewing or Downloading Topic Connection Properties View and download connection properties related a topic for Apache Kafka Wire Protocol. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Topic Delete a topic for Apache Kafka Wire Protocol. Prerequisites The topic to delete must exist. You must be a fabric user to delete a topic. About this task You can delete a topic associated with a fabric. Follow the steps given below to delete a topic. Procedure Log on to the Data Fabric UI . Select Fabric user option from the\n dropdown on the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the topic to\n delete. Click the topic name seen under Resource Name . Locate the topic to delete. Click the ellipsis under Actions for the topic row in the tabular list\n of resources. Click the Delete menu option. Click Delete to confirm topic deletion. Results The topic is permanently deleted, along with the data\n stored on the topic. A message regarding the topic deletion is displayed on the Data Fabric UI . Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . stream topic delete (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/deleting_kafka_topic.html", + "title": "Deleting a Topic" + }, + { + "content": "\nViewing or Downloading Topic Connection Properties Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Viewing or Downloading Topic Connection Properties View and download connection properties related a topic for Apache Kafka Wire Protocol. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Creating a Topic Create a topic for Apache Kafka Wire Protocol. Editing a Topic Edit a topic for Apache Kafka Wire Protocol. Deleting a Topic Delete a topic for Apache Kafka Wire Protocol. Viewing or Downloading Topic Connection Properties View and download connection properties related a topic for Apache Kafka Wire Protocol. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing or Downloading Topic Connection Properties View and download connection properties related a topic for Apache Kafka Wire\n Protocol. Prerequisites You must be a fabric user or a fabric manager to view and\n download connection properties for topic. About this task You can view and/or download topic connection properties from the Data Fabric UI . Use the following steps to view or download\n connection properties. Procedure Log on to the Data Fabric UI . Under the default Fabric user ,\n click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the topic. Click the View Connection Properties in the Endpoint column for\n the topic, to view the connection properties. If you want to download the connection properties, click Download on the Connection Properties dialog box. Results The connection properties file is downloaded to the default\n downloads folder on the local machine. (Topic last modified: 2023-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_download_topic_connection_properties.html", + "title": "Viewing or Downloading Topic Connection Properties" + }, + { + "content": "\nAdministering Volumes Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric provides volumes as a way to organize data and\n manage fabric performance. A volume is a logical storage unit that allows you to apply policies to a set of files,\n directories, and sub-volumes. You can use volumes to enforce disk usage limits, establish ownership and accountability, and measure the cost generated by\n different projects or departments. For example, you could create a volume for each user,\n department, or project. A volume consumes storage space only when data is written to the volume. Volume Types Volumes can be of the following types. Standard volume : A initial or original volume that contains data. Mirror volume : A replica of a standard volume that replicates the data present on\n a standard volume. Volume Data Backup Following are the ways in which data on volumes can be backed up for restoration\n purpose. Creating mirrors or mirror volumes : Data in a volume is replicated when you\n create mirror volumes. A volume that is mirrored is called a standard volume. The mirrored\n version of a volume is called a mirror volume. You can create mirrors of mirror volumes to\n replicate the data present on mirror volumes. Such volumes are called cascading mirrors. Creating volume snapshots : The state of data in a volume is recorded at the\n point-in-time of the volume snapshot creation. A snapshot is a read-only image of a volume\n at a specific point in time. Snapshots are useful any time you need to be able to roll\n back to a known good data set at a specific point in time. For example, before performing\n a risky operation on a volume, you can create a snapshot to enable rollback capability for\n the entire volume. A snapshot takes no time to create, and initially uses no disk space,\n because it stores only the incremental changes needed to roll the volume back to the\n state at the time the snapshot was created. The storage used by a volume's snapshots\n does not count against the volume's quota. Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. (Topic last modified: 2023-06-28) On this page Volume Types Volume Data Backup \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/administering_volumes.html", + "title": "Administering Volumes" + }, + { + "content": "\nCreating a Standard Volume Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Standard Volume Procedure to create standard volume. Prerequisites The fabric for which you wish to create the volume must have been created on the\n cloud or on-premises, or in an air-gapped environment. You must have the permission to create a volume. If you wish to enable data at rest encryption for the volume, the fabric must\n have data-at-rest encryption enabled. If you wish to associate a security policy with the volume, the security policy\n must have been created by the fabric. manager. About this task The task describes how to add standard volume via the Data Fabric UI . A standard volumes is a volume where data is\n originally written. A standard volume is read-write, default. It can marked as\n read-only, if required. You have the option to create a new storage policy or\n create a new remote target to cold tiering of the volume. You can create a new\n storage policy or a remote target if you have not already created one or wish to\n create a new storage policy or remote target for the cold data that would be stored\n on the volume to create. Follow the steps given below to create a standard\n volume. Procedure Log on to the Data Fabric UI . Under the default Fabric user ,\n click Create Volume on the Resources card . Enter the volume name. Select the fabric on which you wish to create the volume. Select the Standard option for Type . Specify the Mount Path for the volume. Enter the Volume Limit . The value specified for the volume limit must be\n less than or equal to the fabric capacity. Select a security policy for the volume. This is an optional step. Turn on the Data at rest encryption toggle to enable encryption for data\n at rest. This is an optional step. Turn on the Data tiering toggle to enable data tiering to be able to offload\n data to cold tier. This is an optional step. If you enable data tiering, select tiering type. On selecting tiering\n type as Remote archiving(Cold) , you can offload data to a cold\n tier. If you have selected Remote archiving (cold) , select the Storage policy and the Remote Target . You can create a\n new storage policy and/or a remote target by clicking Create new under\n the respective dropdowns. Click Create . Results The volume is successfully created on the selected fabric and the details are\n displayed in the list of resources under the respective fabric on the Resources card. If you have turned on data tiering, tiering is enabled on the volume and you can\n configure tiering options on the Settings tab for the volume. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . volume create (Topic last modified: 2023-10-18) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/add_standard_volume.html", + "title": "Creating a Standard Volume" + }, + { + "content": "\nCreating a Mirror Volume Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Mirror Volume Procedure to create mirror volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Mirror Volume Procedure to create mirror volume. Prerequisites The fabric for which you wish to create the volume must have been created on the\n cloud or on-premises or in an air-gapped environment. The standard volume that you wish to mirror must have been created, before you\n can create a mirror volume. You must have the permission and valid activation key to create a mirror volume\n on the fabric. About this task A mirror is a replica of a standard volume and consumes space on the disk. You can\n create a mirror of an existing volume by using the Data Fabric UI . Mirror volumes can be used for data recovery to recover data on corrupted or lost\n volume data. A mirror volume can be created on-premise or in the cloud, based on your fabric\n setup. Mirror volumes can be created on a fabric other than the fabric to which the\n source volume belongs. Mirror volumes created on-premise are called local mirrors. Mirror volumes can be local or remote. Remote mirrors are used for disaster recovery, while local mirrors are used for load\n balancing. Mirror volumes that are created from mirror volumes are called cascading mirrors. Follow the steps given below to create a mirror volume. Procedure Log on to the Data Fabric UI . Under the default Fabric user ,\n click Create Volume on the Resources card. Alternatively, you can\n click the ellipsis under Actions for the volume to mirror, on the\n Resources card, and click the Create mirror option to create a mirror for\n the volume. Enter the volume name. Select the fabric on which you wish to create the volume from Source\n Fabric . Select the Mirror option for Type . Select the Source Cluster that represents the cluster/fabric that\n contains the volume to replicate. Select the Source Volume to replicate. This is non-editable if you have\n already chosen the volume to mirror. Specify the Mount Path for the volume. Enter the Volume Limit . The value specified for the volume limit must be\n less than or equal to the fabric capacity. Select a security policy for the volume. This is an optional step. Turn on the Data at rest encryption toggle to enable encryption for data\n at rest. This is an optional step. Turn on the Data tiering toggle to enable data tiering to be able to offload\n data to cold tier. This is an optional step. If you enable data tiering, select tiering type. On selecting tiering\n type as Remote archiving(Cold) , you can offload data to a cold\n tier. If you have selected Remote archiving (cold) , select the Storage policy and the Remote Target . You can create a\n new storage policy and/or a remote target by clicking Create new under\n the respective dropdowns. Click Create . Results The mirror volume is created. NOTE: Alternatively, you can click the volume to mirror in Resources list, and click Actions > Create mirror Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . volume create (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/add_mirror_volume.html", + "title": "Creating a Mirror Volume" + }, + { + "content": "\nConverting Standard Volume to Mirror Volume Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Converting Standard Volume to Mirror Volume HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Converting Standard Volume to Mirror Volume Prerequisites You must be a fabric user to convert a standard volume to a mirror volume. The standard volume must have an associated mirror volume that can serve as the\n source volume. About this task You can convert a standard volume to a mirror volume and set it up to mirror one of\n its associated mirror volumes. NOTE: The standard volume, when converted, can only be a mirror of one of its\n associated mirror volumes. Follow the steps given below to convert a standard volume to a mirror volume. Procedure Log on to the Data Fabric UI . On the Resources card, click the standard volume name that you wish to\n convert to a mirror volume. Click the Actions menu seen on the top right of the tabs displaying the volume\n details. Click Make Mirror . Select the Source fabric that has the mirror volume that the converted\n volume would mirror. Select the Source volume that the converted volume will mirror. Click Save . Results The source fabric is converted into a read-only mirror of\n the selected source volume on the selected source fabric with the existing name. You\n can associate a mirroring schedule with this volume to ensure that data on the\n volume is in sync with the source volume. (Topic last modified: 2024-01-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/converting_standard_volume_to_mirror_volume.html", + "title": "Converting Standard Volume to Mirror Volume" + }, + { + "content": "\nEditing a Volume Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Setting a Volume Quota Set space quota for volume. Configuring Data Access Control for Volume Configuring Volume Administration Settings Configure volume access control for various user types. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard\n quota. Prerequisites You must have the permission to edit the volume. About this task You can edit details such as hard quota, read/write access and accountable entity,\n when you edit a volume via the Data Fabric UI. Follow the steps given below to edit volume properties. Procedure Log on to the Data Fabric UI . Under the default Fabric user experience , click the Table view icon on\n the Resources card. In the tabular list of fabrics, click the down arrow\n for the fabric that contains the volume. Click the volume name seen under Resource Name . Navigate to Settings tab for the volume. Click the pencil/Edit icon next to Properties to edit the accountable\n entity and/or volume access as read or read/write. Click Save. This is an\n optional step. Click the pencil/Edit icon next to Quota to edit the hard quota for the\n volume. This is an optional step. Click Save . Results The volume is edited successfully. Setting a Volume Quota Set space quota for volume. Configuring Data Access Control for Volume Configuring Volume Administration Settings Configure volume access control for various user types. More information Configuring Data Access Control for Volume Configuring Volume Administration Settings (Topic last modified: 2023-10-18) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/edit_volume.html", + "title": "Editing a Volume" + }, + { + "content": "\nSetting a Volume Quota Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Setting a Volume Quota Set space quota for volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Setting a Volume Quota Set space quota for volume. Configuring Data Access Control for Volume Configuring Volume Administration Settings Configure volume access control for various user types. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Setting a Volume Quota Set space quota for volume. Prerequisites You must have be a fabric user to configure or set volume quota. About this task You can set hard quota for volume via the Data Fabric UI. NOTE: It is recommended to set advisory quota for a volume. See Setting advisory quota via CLI to set\n advisory quota. An alarm is set off when advisory quota is reached or exceeded. Use the following steps to set the volume quota. Procedure Log on to the Data Fabric UI . Select the Fabric user on the Home\n page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume to set the\n quota for. Click the volume name seen under Resource Name . Navigate to the Settings tab. Click the Edit icon seen next to Quota . Enter the value and select the unit of measurement for the quota. Click Save . Results The hard quota for the volume is set. Data on the volume\n cannot occupy space higher than the quota. If the data on the volume exceeds the\n hard quota, an alarm is raised. (Topic last modified: 2023-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/setting_volume_quota.html", + "title": "Setting a Volume Quota" + }, + { + "content": "\nConfiguring Data Access Control for Volume Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Configuring Data Access Control for Volume HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Setting a Volume Quota Set space quota for volume. Configuring Data Access Control for Volume Configuring Volume Administration Settings Configure volume access control for various user types. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Configuring Data Access Control for Volume Prerequisites The following prerequisites must be met before attempting to\n configure data access control for a volume. The user or group, for which you wish to assign permissions, must exist for the\n fabric. You must have be a fabric user or fabric manager to configure data access for\n the volume. About this task Configure or control access to data on a volume. There are three user types that can be assigned access to data on a volume. The user\n types are as follows. Public - Public refers to all users. User - A user is an individual user that uses the Data Fabric UI. Group - Groups are collection of users that are categorized based on a\n commonality such as department or location. You can assign read or write permissions to public or to specific users and/or\n groups. By default, read and write permissions are assigned to all users. When a read or write permission is assigned to public, it implies that all users and\n groups are able to read data from and write data to the volume in question. NOTE: A\n write permission assigned to a user type implies that the respective user type\n has both read and write permissions on the volume. If you wish to assign read or write permission to specific users and/or groups, you\n must first remove any permission assigned to the public user type. You cannot assign\n read or write permissions to specific users and/or groups when the respective\n permissions are assigned to public. To configure data access control for a volume, follow these steps: Procedure Log on to the Data Fabric UI . Select the Fabric user option on the\n Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume for which\n you wish to set data access control. Click the volume name seen under Resource Name . Click the Settings tab. Click the Edit icon seen next to Data Access Control . Click Add to add permissions for a user or a group. Select user or group from the Type dropdown. Enter the name of the user or group to which you wish to assign the permission\n or permissions. Select read and/or write checkbox depending on the permission you wish to\n assign to the user or the group. Repeat the steps related to adding user/group and assigning permissions to\n them, as necessary. Click Save . Results Permissions are assigned to the respective users and/or\n groups. (Topic last modified: 2023-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/configure_data_access_control_for_volume.html", + "title": "Configuring Data Access Control for Volume" + }, + { + "content": "\nConfiguring Volume Administration Settings Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Configuring Volume Administration Settings Configure volume access control for various user types. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Setting a Volume Quota Set space quota for volume. Configuring Data Access Control for Volume Configuring Volume Administration Settings Configure volume access control for various user types. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Configuring Volume Administration Settings Configure volume access control for various user types. Prerequisites The following prerequisites must be met before you can configure volume\n administration settings for users and groups. You must have be a fabric manager to perform the operation. The users and groups must have been created before you can assign various\n volume-related permissions. About this task You can assign various permissions related to a volume to specific users and/or\n groups. The following permissions with respect to a volume can be assigned to one or more\n users and/or groups. Edit permission Admin permission to manage access control to volume and data on the volume Restore and mirror permission Delete permission Full control, which means, all the aforementioned permissions. Follow the steps given below to configure volume administration settings. Procedure Log on to the Data Fabric UI . Select Fabric user option from the\n dropdown on the Home page. Click the Table view icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume. Click the volume name seen under Resource Name . Click the Settings tab. Click the edit icon seen next to Volume Admin Control . Click Add to add permissions for a user or a group. Select user or group from the Type dropdown. Enter the name of the user or group to which you wish to assign the permission\n or permissions. Select the checkbox or checkboxes for the permission(s) that you wish to assign\n to the user or the group. Repeat the steps related to adding user/group and assigning permissions to\n them, as necessary. Click Save . Results Permissions are assigned to the respective users and/or\n groups. (Topic last modified: 2023-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/configuring_volume_administration_settings.html", + "title": "Configuring Volume Administration Settings" + }, + { + "content": "\nRenaming a Volume Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Renaming a Volume Rename a volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Renaming a Volume Rename a volume. Prerequisites You must have the permission to rename a volume. About this task You can rename a volume associated with a fabric, via the Data Fabric UI. You can\n rename only one volume at a time. Follow the steps given below to rename a volume. Procedure Log on to the Data Fabric UI . Select Fabric user option from the\n dropdown on the Home page. Click the Table view icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume to\n rename. Click the volume name seen under Resource Name . Click the ellipsis under Actions for the volume. Click the Rename option. Enter the new name for the volume and click Save . Results The volume is renamed successfully. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . volume rename (Topic last modified: 2023-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/renaming_a_volume.html", + "title": "Renaming a Volume" + }, + { + "content": "\nViewing Volume Endpoint Info Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Viewing Volume Endpoint Info View volume endpoint information. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing Volume Endpoint Info View volume endpoint information. Prerequisites You must be a fabric user to perform this\n operation. About this task You can view volume endpoint information from the Data Fabric UI. This can\n be used in scripts to make API calls to access the volume. Follow the steps given\n below to view volume endpoint information. Procedure Log on to the Data Fabric UI . Select Fabric user option from the\n dropdown on the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume. Click the ellipsis under Actions for the required volume. Click the View endpoint option. Results The volume endpoint information is displayed. (Topic last modified: 2024-01-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_endpoint_for_volume.html", + "title": "Viewing Volume Endpoint Info" + }, + { + "content": "\nViewing Object Endpoint Info to Remotely Access Files as Objects Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as\n objects when accessed by S3 client. Prerequisites You must be a fabric user to perform this\n operation. About this task A volume contains one or more files. The files in a volume can be accessed as objects\n by an S3 client, via the endpoints provided by Data Fabric. You can view the object\n endpoints from the Data Fabric UI. The object endpoints for files can be used to make API calls in scripts for S3\n clients to access files in the volume. Follow the steps given below to view object endpoint information for files in a\n volume. Procedure Log on to the Data Fabric UI . Select Fabric user option from the dropdown on the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume. Click the ellipsis under Actions for the required volume. Click the View endpoint option. Results You are able to view the volume endpoint and object endpoint information\n corresponding to the files on the volume. (Topic last modified: 2024-01-09) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_object_endpoints_to_access_files_as_objects.html", + "title": "Viewing Object Endpoint Info to Remotely Access Files as Objects" + }, + { + "content": "\nDownloading Volume Endpoint Information Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint\n information. Prerequisites You must be a fabric user to perform this\n operation. About this task You can download the endpoint information for a selected volume. The downloaded\n volume endpoint information is available as a JSON file. NOTE: You can view object endpoints for files on the Data Fabric UI. Only volume\n endpoint information is downloadable. Follow the steps given below to download information related to volume endpoint. Procedure Log on to the Data Fabric UI . Select Fabric user option from the dropdown on the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume. Click the ellipsis under Actions for the required volume. Click the View endpoint option. Click Download on the Endpoints dialog box. Results A JSON file containing the volume endpoint information for the selected volume is\n downloaded to your local downloads folder. (Topic last modified: 2024-01-08) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/downloading_volume_endpoints_and_object_endpoints_for_files_on_a_volume.html", + "title": "Downloading Volume Endpoint Information" + }, + { + "content": "\nDeleting a Volume Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Deleting a Volume Delete a single volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Volume Delete a single volume. Prerequisites You must be a fabric user to perform this operation. About this task You can delete a volume associated with a fabric, via the Data Fabric UI. Once the\n volume is deleted, the data on the volume is lost unless you have a backup of the\n data on the volume. Follow the steps below to delete a volume. Procedure Log on to the Data Fabric UI . Select Fabric user option from the\n dropdown on the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume to\n delete. Click the volume name seen under Resource Name . Click the ellipsis under Actions for the volume. Click the Delete option. Click Delete to confirm volume deletion. Results The volume is permanently deleted, along with the data stored on the volume. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . volume delete (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/deleting_volume.html", + "title": "Deleting a Volume" + }, + { + "content": "\nAdministering Volume Snapshots Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Administering Volume Snapshots Snapshot overview and administering snapshots. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Schedules for Volume Snapshots Describes schedules for snapshots Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. Preserving a Volume Snapshot Preserve a volume snapshot. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. Deleting a Volume Snapshot Delete a volume snapshot. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Volume Snapshots Snapshot overview and administering snapshots. Snapshot Basics A snapshot is a static or\n read-only view of a volume that represents the state of the volume at the point in\n time of the snapshot creation. Because a snapshot is not a replica of a volume, it\n does not occupy much space on the volume. A snapshot takes no time to create,\n and initially uses no disk space, because it stores only the incremental changes\n needed to roll the volume back to the state at the time the snapshot was created.\n The storage used by a volume's snapshots does not count against the volume's\n quota. A snapshot can be used to restore volume data to the state the volume\n data at the time of the snapshot creation. You can use a snapshot for the\n following purposes. Snapshots enable you to roll back to a known good data set and recover data\n in case of data corruption or accidental deletions, without the help of\n storage administrators. For example, before performing a risky operation on\n a volume, you can create a snapshot to enable rollback capability for the\n entire volume. Create static data sets for querying and auditing. Snapshots are stored in the .snapshot directory on the\n volume mount path. You can access snapshots via NFS or the Hadoop shell. You\n can create a snapshot manually, or automate the process with a schedule. If you wish\n to schedule the creation of snapshots, you must assign a predefined schedule. Schedules for Volume Snapshots Describes schedules for snapshots Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. Preserving a Volume Snapshot Preserve a volume snapshot. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. Deleting a Volume Snapshot Delete a volume snapshot. More information Creating a Volume Snapshot Preserving a Volume Snapshot Restoring a Volume from Volume Snapshot Deleting a Volume Snapshot (Topic last modified: 2023-04-25) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/administering_volume_snapshots.html", + "title": "Administering Volume Snapshots" + }, + { + "content": "\nSchedules for Volume Snapshots Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Administering Volume Snapshots Snapshot overview and administering snapshots. Schedules for Volume Snapshots Describes schedules for snapshots HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Schedules for Volume Snapshots Describes schedules for snapshots Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. Preserving a Volume Snapshot Preserve a volume snapshot. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. Deleting a Volume Snapshot Delete a volume snapshot. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Schedules for Volume Snapshots Describes schedules for snapshots About Schedule A schedule to capture snapshots of volume data can be created and assigned to\n volumes. Data Fabric provides predefined schedules that can be applied to volumes. Predefined schedules are classified into three categories depending on the type of\n data in the volume you wish to back up with a snapshot. You can select a pre-defined schedule for a volume,depending on the type of data that\n the volume contains. If the volume data needs to be backed up very frequently as a snapshot, select the\n critical data schedule. If it suffices to back up volume data less frequently, select the normal data\n schedule. Table 1 . Predefined-Schedules for Volume Pre-defined Schedule Frequency and Retention Period Comments Critical Data Hourly - Retained for 24 hours Daily at 12:00 AM - Retained for 7 days Weekly every Sunday at 12:00 AM - Retained for 12\n weeks Use for volumes with data that might be changing constantly\n and/or needs to be frequently backed up. If you wish to\n preserve the snapshot beyond the default retention period\n per schedule, you can preserve the snapshot. Important Data Daily at 6:00 AM - Retained for 24 hours Daily at 12:00 PM - Retained for 24 hours Daily at 6:00 PM - Retained for 24 hours Daily at 12:00 AM - Retained for 7 Days Weekly every Sunday at 12:00 AM - Retained for 4\n weeks Monthly every first day of the month at 12:00 AM -\n Retained for 2 months Use for volumes containing data that needs to be backed up\n frequently during the day and week. If you wish to preserve the\n snapshot beyond the default retention period per schedule, you\n can preserve the snapshot. Normal Data Daily at 12:00 AM - Retained for 7 days weekly every Sunday 12:00 AM - Retained for 4 weeks Monthly every first day of the month at 12:00 AM -\n Retained for 2 months Use for volumes for volumes containing data that changes\n infrequently or does not need to be backed up frequently. If\n you wish to preserve the snapshot beyond the default\n retention period per schedule, you can preserve the\n snapshot. When you specify a snapshot schedule on a mirror volume, it specifies how often to\n take a snapshot of the mirror volume. This snapshot schedule is distinct from the\n snapshot schedule for the standard volume. A snapshot schedule for a promotable mirror volume has two purposes: The schedule specifies how often to take a snapshot of the mirror volume for\n the purpose of preserving the state of the mirror before a subsequent mirror\n operation. This way, if corrupt data is copied from the source volume's\n snapshot into the mirror volume, the mirror contents can be rolled back to\n the snapshot. If the promotable mirror volume is promoted to a read-write volume, the\n snapshot schedule specified for the mirror is used for the promoted\n read-write volume. Once a mirror volume is promoted to a read-write\n volume, the mirror schedule is disabled. (Topic last modified: 2023-04-24) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/schedules_for_volume_snapshots.html", + "title": "Schedules for Volume Snapshots" + }, + { + "content": "\nCreating a Volume Snapshot Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Administering Volume Snapshots Snapshot overview and administering snapshots. Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Schedules for Volume Snapshots Describes schedules for snapshots Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. Preserving a Volume Snapshot Preserve a volume snapshot. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. Deleting a Volume Snapshot Delete a volume snapshot. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . Prerequisites You must be a fabric user to create a volume snapshot. About this task A snapshot is a read-only image of a volume that provides point-in-time recovery of\n the volume if data on the volume gets corrupted or is lost due to some reason.\n Snapshots store changes to the data present in the volume. A snapshot preserves\n access to historical data and facilitates data retrieval, when data is lost due to\n user errors and application errors. You can create a snapshot manually, or automate the process with a schedule.\n Snapshots are stored in the .snapshots directory. You can always\n view snapshots from this directory. The maximum number of snapshots that you can create for each volume is\n 4092. Exceeding this limit raises the snapshot failure alarm with an appropriate\n entry in the CLDB logs. Follow the steps given below to create a snapshot manually via the Data Fabric UI . Procedure Log on to the Data Fabric UI . Select Fabric user option from the\n dropdown on the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume to\n snapshot. Click the volume name seen under Resource Name . Navigate to the Snapshots tab. Click Create seen on the top right side of the tab. Enter a name for the volume snapshot or retain the default name. Click Save . Results The volume snapshot is successfully created. The snapshot is added to the list of\n snapshots seen on the Snapshots tab. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . volume snapshot create (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/create_volume_snapshot.html", + "title": "Creating a Volume Snapshot" + }, + { + "content": "\nScheduling Volume Snapshots Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Administering Volume Snapshots Snapshot overview and administering snapshots. Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Schedules for Volume Snapshots Describes schedules for snapshots Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. Preserving a Volume Snapshot Preserve a volume snapshot. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. Deleting a Volume Snapshot Delete a volume snapshot. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. Prerequisites You must be a fabric user to assign a schedule to volume to take a volume\n snapshot. The volume to assign the pre-defined schedule to must be a standard volume. About this task You can assign a pre-defined schedule to a standard volume to take volume\n snapshots. See Schedules for Volume Snapshots for details on pre-defined schedules. Follow the steps given below to schedule volume snapshot. Procedure Log on to the Data Fabric UI . Select Fabric user option from the\n dropdown on the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume to\n snapshot. Click the volume name seen under Resource Name . Navigate to the Settings tab. Under Schedules , click the Edit icon seen next to Snapshots . Select a suitable schedule option for the volume. Click Select . Results The selected schedule is applied to the volume and snapshots\n are taken per schedule. The snapshots are stored in the .snapshots directory of the volume mount path. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/scheduling_volume_snapshots.html", + "title": "Scheduling Volume Snapshots" + }, + { + "content": "\nPreserving a Volume Snapshot Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Administering Volume Snapshots Snapshot overview and administering snapshots. Preserving a Volume Snapshot Preserve a volume snapshot. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Schedules for Volume Snapshots Describes schedules for snapshots Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. Preserving a Volume Snapshot Preserve a volume snapshot. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. Deleting a Volume Snapshot Delete a volume snapshot. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Preserving a Volume Snapshot Preserve a volume snapshot. Prerequisites You must be a fabric user to preserve a volume snapshot. About this task You can preserve a volume snapshot if you wish to store a volume snapshot beyond the\n retention period defined during its creation. Procedure Log on to the Data Fabric UI . Select Fabric user option from the\n dropdown on the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume whose\n snapshot is to be preserved. Click the volume name seen under Resource Name . Navigate to the Snapshots tab. Select the checkbox for the snapshot to preserve. Click the down arrow next to Actions seen on the top right side of the\n tab. Alternatively, Click the Preserve menu option. Click Preserve on the message box that appears. Results The volume snapshot is preserved successfully. A message informing you about\n successful preservation of snapshot is displayed on the Data Fabric UI. The volume snapshot can be accessed from the .snapshot directory on\n the volume mount path. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . volume snapshot preserve (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/preserving_a_volume.html", + "title": "Preserving a Volume Snapshot" + }, + { + "content": "\nRestoring a Volume from Volume Snapshot Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Administering Volume Snapshots Snapshot overview and administering snapshots. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Schedules for Volume Snapshots Describes schedules for snapshots Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. Preserving a Volume Snapshot Preserve a volume snapshot. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. Deleting a Volume Snapshot Delete a volume snapshot. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. Prerequisites You must be a fabric user to restore a volume from the volume snapshot. About this task You can restore volume data to a specific point in time with volume snapshots. Follow the steps given below to restore a volume from a volume snapshot. Procedure Log on to the Data Fabric UI . Select the Fabric user option on\n the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume whose\n snapshot is to be restored. Click the volume name seen under Resource Name . Navigate to the Snapshots tab. Select the checkbox for the snapshot from which you wish to restore the\n volume. Click the ellipsis seen next to the snapshot to restore from. Click the Restore menu option. Click Restore on the message box that appears. Results The volume is restored from the snapshot. A message informing you about successful\n restoration of volume is displayed on the Data Fabric UI. The volume snapshot is preserved indefinitely in the .snapshot directory on the volume mount path. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . volume snapshot restore (Topic last modified: 2023-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/restoring_snapshot.html", + "title": "Restoring a Volume from Volume Snapshot" + }, + { + "content": "\nDeleting a Volume Snapshot Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Administering Volume Snapshots Snapshot overview and administering snapshots. Deleting a Volume Snapshot Delete a volume snapshot. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Schedules for Volume Snapshots Describes schedules for snapshots Creating a Volume Snapshot Create volume snapshot manually via Data Fabric UI . Scheduling Volume Snapshots Assign schedule to volume for creation of volume snapshots. Preserving a Volume Snapshot Preserve a volume snapshot. Restoring a Volume from Volume Snapshot Describes how to restore a volume from a volume snapshot. Deleting a Volume Snapshot Delete a volume snapshot. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Volume Snapshot Delete a volume snapshot. Prerequisites You must be a fabric user to delete a volume snapshot. About this task You can delete a volume snapshot manually, if you no longer wish to retain the volume\n snapshot. NOTE: A volume snapshot is deleted automatically when the retention period for the\n volume snapshot ends. Procedure Log on to the Data Fabric UI . Select the Fabric user option on\n the Home page. Click the Table View icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume whose\n snapshot is to be preserved. Click the volume name seen under Resource Name . Navigate to the Snapshots tab. Select the checkbox for the snapshot to delete. Click the down arrow next to Actions seen on the top right side of the\n tab. Click the Delete menu option. Click Delete on the message box that appears. Results The volume snapshot is deleted successfully. The volume snapshot is removed from the .snapshot folder. You are no longer able to access or use the\n volume snapshot. The data on the volume referred to by the snapshot remains intact,\n only the static view pointing to the volume data is deleted. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . volume snapshot remove (Topic last modified: 2023-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/delete_volume_snapshot.html", + "title": "Deleting a Volume Snapshot" + }, + { + "content": "\nData Tiering Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Data Tiering Conceptual information about data tiering. Data that is active and frequently accessed is categorized as hot data. Data that is rarely accessed is categorized as warm data or cold data. Hot data, warm data, and cold data is identified based on the rules and policies set on\n by the administrator. The storage used to store hot data is referred to as the hot-tier. The storage used to\n store warm data is referred to as an EC-tier, and the mechanism to store cold data is\n referred to as the cold tier. Data starts off as hot data when it is first written to local storage on a fabric. Data\n can be termed as warm or cold based on the storage policies that are configured for the\n data present on Data Fabric. Data stored on a fabric requires three times the amount of disk space of the regular\n volume on premium hardware due to replication (default being 3). After offloading to the\n cloud, the space used by data (including data in the namespace container) in the volume\n on the data fabric cluster is freed and only the metadata of the volume in the namespace\n container is 3-way replicated on the data fabric cluster. Data can be set up to be automatically offloaded to a volume on a low-cost storage\n alternative, called a warm tier, on the data fabric cluster. Alternatively, data can be\n offloaded to a low-cost storage on a third party cloud object store, called a cold tier,\n like S3. Data Fabric provides rule-based automated tiering functionality that allows you to\n seamlessly integrate with: Low-cost storage as an additional storage tier in the data fabric cluster for\n storing file data that is less frequently accessed (warm data) in erasure-coded\n volume. 3rd party cloud object storage as an additional storage tier in the data fabric\n cluster to store file data that is rarely accessed or archived (cold data). In this way, valuable on-premise storage resources can be used for more active or\n hot file data and applications, while warm and/or cold file data can be retained at\n minimum cost for compliance, historical, or other business reasons. The data fabric\n provides consistent and simplified access to and management of the data. Data, once offloaded, is purged on the the data fabric cluster to release the disk space.\n When you delete an entire file, part of a file, or a snapshot, corresponding objects are\n removed from the tier When a client tries to read offloaded data, the data fabric processes the read request of\n the warm-tiered and cold-tiered standard and mirror volume data differently. Similarly,\n when a client writes to a tiered volume, the data fabric processes appends and\n overwrites differently. To manage data offloading, you must have created storage policies. See Administering Storage Policies to learn more about managing storage\n policies. To offload data, you must create remote targets. See Creating a Remote Target to add a new remote target. You can schedule data offloading. See for further information\n on creating schedule. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. (Topic last modified: 2023-07-27) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/data_tiering.html", + "title": "Data Tiering" + }, + { + "content": "\nSchedules for Volume Data Tiering Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data About Schedule A schedule to capture snapshots of volume data can be created and assigned to\n volumes. Data Fabric provides predefined schedules that can be applied to volumes. Predefined schedules are classified into three categories depending on the type of\n data in the volume you wish to back up with a snapshot. You can select a pre-defined schedule for a volume,depending on the type of data that\n the volume contains. If the volume data needs to be backed up very frequently as a snapshot, select the\n critical data schedule. If it suffices to back up volume data less frequently, select the normal data\n schedule. Table 1 . Predefined-Schedules for Volume Pre-defined Schedule Frequency and Retention Period Comments Critical Data Hourly - Retained for 24 hours Daily at 12:00 AM - Retained for 7 days Weekly every Sunday at 12:00 AM - Retained for 12\n weeks Use for volumes with data that might be changing constantly\n and/or needs to be frequently backed up. If you wish to\n preserve the snapshot beyond the default retention period\n per schedule, you can preserve the snapshot. Important Data Daily at 6:00 AM - Retained for 24 hours Daily at 12:00 PM - Retained for 24 hours Daily at 6:00 PM - Retained for 24 hours Daily at 12:00 AM - Retained for 7 Days Weekly every Sunday at 12:00 AM - Retained for 4\n weeks Monthly every first day of the month at 12:00 AM -\n Retained for 2 months Use for volumes containing data that needs to be backed up\n frequently during the day and week. If you wish to preserve the\n snapshot beyond the default retention period per schedule, you\n can preserve the snapshot. Normal Data Daily at 12:00 AM - Retained for 7 days weekly every Sunday 12:00 AM - Retained for 4 weeks Monthly every first day of the month at 12:00 AM -\n Retained for 2 months Use for volumes for volumes containing data that changes\n infrequently or does not need to be backed up frequently. If\n you wish to preserve the snapshot beyond the default\n retention period per schedule, you can preserve the\n snapshot. When you specify a snapshot schedule on a mirror volume, it specifies how often to\n take a snapshot of the mirror volume. This snapshot schedule is distinct from the\n snapshot schedule for the standard volume. A snapshot schedule for a promotable mirror volume has two purposes: The schedule specifies how often to take a snapshot of the mirror volume for\n the purpose of preserving the state of the mirror before a subsequent mirror\n operation. This way, if corrupt data is copied from the source volume's\n snapshot into the mirror volume, the mirror contents can be rolled back to\n the snapshot. If the promotable mirror volume is promoted to a read-write volume, the\n snapshot schedule specified for the mirror is used for the promoted\n read-write volume. Once a mirror volume is promoted to a read-write\n volume, the mirror schedule is disabled. (Topic last modified: 2023-08-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/schedules_for_volume_data_tiering.html", + "title": "Schedules for Volume Data Tiering" + }, + { + "content": "\nManually Offloading Data to a Cold Tier Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Manually Offloading Data to a Cold Tier HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Manually Offloading Data to a Cold Tier Prerequisites You must be a fabric user to perform this operation. Data tiering must have been enabled on the volume during the volume creation, to\n be able to offload/recall data. To offload data, you must create remote targets. See Creating a Remote Target to add a new remote target. To manage data offloading, you must have created storage policies. See Administering Storage Policies to learn more about managing\n storage policies. About this task Data, once offloaded, is purged on the the data fabric cluster to release the\n disk space. When you delete an entire file, part of a file, or a snapshot,\n corresponding objects are removed from the tier. Data is offloaded to the tier\n in the same state, compressed or uncompressed, as was stored in the front-end\n volume. If data encryption is enabled on the front-end volume (using the dare parameter), data is encrypted during and after\n offload. At the volume level, data can be offloaded manually by triggering the\n offload operation. Follow the steps given below to offload data manually from a\n volume to a cold tier. Procedure Log on to the Data Fabric UI . Select Fabric user on the home page. Click the Table view icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume. Click the ellipsis under Actions for the required volume. Select Offload data . Results The data offload begins to the designated cold\n tier. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/offloading_data_from_a_volume.html", + "title": "Manually Offloading Data to a Cold Tier" + }, + { + "content": "\nRecalling Data to the Data Fabric File System Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Recalling Data to the Data Fabric File System HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Recalling Data to the Data Fabric File System Prerequisites You must be a fabric manager or a fabric user to perform this operation. Data tiering must have been enabled on the volume during the volume creation, to\n be able to offload/recall data. Offloaded data must be present on the cold tier. About this task When you read data that has been offloaded to a remote target (or cold tier), data is\n automatically recalled to the file system to allow the read to succeed. The recalled data is automatically: Purged based on the expiration time period set at the volume level for\n recalled data if there are no changes (for example, read operation). Offloaded based on the rule and the expiration time period set at the volume\n level for recalled data if there are changes (for example, overwrite\n operation). For a cold tiering volume, you must explicitly recall the volume before running any\n analytics jobs. Follow the steps given below to recall data manually from a volume to a cold\n tier. Procedure Log on to the Data Fabric UI . Select Fabric user on the Home\n Page. Click the Table view icon on the Resources card. In the tabular list of\n fabrics, click the down arrow for the fabric that contains the volume. Click the ellipsis under Actions for the required volume. Select Recall data . Results The data recall operation begins and data is recalled on to\n the fabric file system. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/recalling_data_to_a_volume.html", + "title": "Recalling Data to the Data Fabric File System" + }, + { + "content": "\nAdministering Storage Policies Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Storage Policies Manage storage policies related to data tiering. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Creating a Storage Policy Editing a Storage Policy Deleting Storage Policy Delete storage policy. Administering Remote Targets Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Storage Policies Manage storage policies related to data tiering. You can configure a storage policy (or rules) for\n data at the volume level. A storage policy simplifies the lifecycle\n management of data in the volume including automated migration of files to low-cost\n storage alternatives. A storage policy contains rules for files that have a\n well-defined lifecycle or for files you want to switch to different storage tiers\n during their lifecycle. You can specify the rules, at the volume level, to\n selectively identify files to offload (such as file size, file owner, and file\n modification time), the schedule for offloading the data (for example, two months\n after file modification), and the settings for storing (such as the location and\n credentials for the tier) and recalling the offloaded data. You can configure one\n rule per volume. You can also associate a schedule to automatically offload data at\n scheduled intervals based on the associated rules. Data offload is driven by\n rules, which are configured per volume. Data offload rule can be based on size of\n file ( s ), owner ( u , g , or p ) of the file, and/or file modification timestamp\n ( m ). You can apply one rule per volume. When a rule is\n associated with a volume, the rule is first applied on the files in the\n tiering-enabled volume. When applied on the files in the tiering-enabled volume, the\n offload is triggered for all files in the snapshot chain as well when the criteria\n in the rule is met. If the file does not exist in the tiering-enabled volume, rule\n is applied on the latest state of the file in the snapshot chain. If the file exists\n in the tiering-enabled volume but has no latest state or if the file was deleted in\n the tiering-enabled volume, offload does not happen. Rules can be defined\n using a combination of the following: u Username or user ID, as configured in the OS registry (such as /etc/passwd file, LDAP, etc.), of a specific\n user. Usage: u: g Group name or group ID, as configured in the OS registry (such as /etc/group file, LDAP, etc.), of a specific\n group. Usage: g: a ( atime ) Time (in seconds or days) since the\n files were last accessed. The number of seconds can be specified by\n appending s to value and the number of days can be\n specified by appending d to the\n value. Usage: NOTE: If the system time on\n CLDB and file server nodes are different, the atime rule for offloading data may not work as intended. This tier rule\n is matched and files are offloaded, when all of the\n following conditions are met: Assume that the atime feature is enabled on the volume and\n that the time in the rule is set to a:300s . Based on this\n rule, all files that are not accessed since 300s, are offloaded.\n However, this rule is valid only if time since atime tracking is enabled, is more than\n 300s. The volume level parameter atimeTrackingStartTime denotes the start\n time of atime . For more information, see Tuning Last Access\n Time . m ( mtime ) Time (in seconds or days) since the\n files were last modified. The number of seconds can be specified by\n appending s to value and the number of days can be\n specified by appending d to the\n value. Usage: All files that are not modified\n since the specified amount of time, are offloaded. NOTE: If\n the system time on CLDB and file server nodes are different, the\n mtime rule for offloading data may not work as intended. s The size of the file in bytes, kilobytes, megabytes, or\n gigabytes. The size of the file can be specified by appending one of\n the following to the value: b for bytes, k for kilobytes, m for\n megabytes, or g for\n gigabytes. Usage All files whose size exceeds the\n specified size are offloaded. Or, use the following: p (Default) Specifies all files. Specifies that this operation is\n applicable to all the files without restriction. This cannot be\n combined with any other operator. \"\" Indicates none of the files. Specifies that this operation cannot\n be performed on any of the files. Use the following to string multiple criteria for offload: & AND operation to combine multiple expressions as the criteria for\n the rule. | OR operation to indicate either of the expressions as the\n criteria for the rule. () Delimiters for subexpressions. Creating a Storage Policy Editing a Storage Policy Deleting Storage Policy Delete storage policy. (Topic last modified: 2023-07-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/managing_storage_policies.html", + "title": "Administering Storage Policies" + }, + { + "content": "\nCreating a Storage Policy Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Storage Policies Manage storage policies related to data tiering. Creating a Storage Policy HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Creating a Storage Policy Editing a Storage Policy Deleting Storage Policy Delete storage policy. Administering Remote Targets Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Storage Policy Prerequisites You must be a fabric manager to perform this task. About this task The expression for a storage policy can comprise the following parameters: u denotes user. g denotes group. s denotes size. Append the value with the unit - b, k, m, g for bytes, KB, MB, or GB respectively. m denotes last modified time. Append the value with the unit - s,\n d for seconds or days respectively a denotes last accessed time. Append the value with the unit - s,\n d for seconds or days respectively. Use & to add an AND condition and | to add an OR condition, to\n combine multiple parameter-value pairs. Example: To offload data to cold tier for user jdoe , when storage size reaches 100 MB , and the last accessed time is 7 days , the expression is u:jdoe&s:100m&a:7d Follow the steps given below to create a storage policy. Procedure Log on to the Data Fabric UI . Select Fabric manager option from\n the dropdown on the Home page. Click Fabric Administration seen on the Home page. Select from the fabrics dropdown the fabric for which you wish to add the\n storage policy. Scroll down to the Storage policies card. NOTE: Alternatively, you can click Global namespace , click the fabric\n link in the table view, and navigate to the Storage policies tab\n for the fabric. Click Create storage policy . Enter the Name for the storage policy. Enter the expression for the storage policy. Click Create . Results The storage policy is created successfully when you enter\n valid values and units for the parameters specified in the policy. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/add_storage_policy.html", + "title": "Creating a Storage Policy" + }, + { + "content": "\nEditing a Storage Policy Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Storage Policies Manage storage policies related to data tiering. Editing a Storage Policy HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Creating a Storage Policy Editing a Storage Policy Deleting Storage Policy Delete storage policy. Administering Remote Targets Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Editing a Storage Policy About this task You can modify the basic rule to: Add or remove users and/or groups. Change the name of the users and/or groups. Change the number of days since the file was last modified for users and/or\n groups. If you switch from a basic rule to an advanced rule, all expressions from the basic\n rule are carried over to the advanced rule. You can modify an advanced rule using a\n combination of the following expressions: u Username or user ID, as configured in the OS registry (such as /etc/passwd file, LDAP, etc.), of a specific\n user. Usage: u: g Group name or group ID, as configured in the OS registry (such as /etc/group file, LDAP, etc.), of a specific\n group. Usage: g: a ( atime ) Time (in seconds or days) since the\n files were last accessed. The number of seconds can be specified by\n appending s to value and the number of days can be\n specified by appending d to the\n value. Usage: \"a:s\" \u2014 specifies atime in seconds \"a:d\" \u2014 specifies atime in days NOTE: If the system time on CLDB and file server nodes\n are different, the atime rule for offloading data\n may not work as intended. This tier rule is matched and files are\n offloaded, when all of the following conditions are\n met: atime tracking is enabled at volume\n level Time since atime that is configured on the\n volume is more than the time specified in the rule Duration since the file was last accessed is more than the\n time specified in the rule Assume that the atime feature is enabled on\n the volume and that the time in the rule is set to a:300s . Based on this rule, all files that are not\n accessed since 300s, are offloaded. However, this rule is valid\n only if time since atime tracking is enabled,\n is more than 300s. The volume level parameter atimeTrackingStartTime denotes the start\n time of atime . For more information, see Tuning Last Access\n Time . m ( mtime ) Time (in seconds or days) since the\n files were last modified. The number of seconds can be specified by\n appending s to value and the number of days can be\n specified by appending d to the\n value. Usage: \"m:s\" \u2014 specifies mtime in\n seconds \"m:d\" \u2014 specifies mtime in\n days All files that are not modified since the specified amount of\n time, are offloaded. NOTE: If the system time on CLDB and file\n server nodes are different, the mtime rule for offloading data may\n not work as intended. s The size of the file in bytes, kilobytes, megabytes, or\n gigabytes. The size of the file can be specified by appending one of\n the following to the value: b for bytes, k for kilobytes, m for\n megabytes, or g for\n gigabytes. Usage \"s:b\" \u2014 specifies file size in\n bytes \"s:k\" \u2014 specifies file size in\n KB \"s:m\" \u2014 specifies file size in\n MB \"s:g\" \u2014 specifies file size in\n GB All files whose size exceeds the specified size are\n offloaded. Or, use the following: p (Default) Specifies all files. Specifies that this operation is\n applicable to all the files without restriction. This cannot be\n combined with any other operator. \"\" Indicates none of the files. Specifies that this operation cannot\n be performed on any of the files. Use the following to string multiple criteria for offload: & AND operation to combine multiple expressions as the criteria for\n the rule. | OR operation to indicate either of the expressions as the\n criteria for the rule. () Delimiters for subexpressions. You cannot switch from an advanced rule that includes the following to a\n basic rule because the following are not supported in a basic rule: p \u2014 All the files s \u2014 The size of the file & \u2014 The AND operation used for specifying multiple users\n ( u ), groups ( g ), or criteria | \u2014 The OR operation used with s or m \"\" \u2014 None of the files. () \u2014 Subexpressions NOTE: The basic rule must contain mtime ( m ). It can also\n include one or more users or groups separated by the OR operation\n ( | ). Follow the steps given below to edit a storage\n policy. Procedure Log on to the Data Fabric UI . Select Fabric manager option from\n the dropdown on the Home page. Click Fabric Administration seen on the Home page. Select from the fabrics dropdown the fabric for which you wish to edit the\n storage policy. Scroll down to the Storage policies card. NOTE: Alternatively, you can click Global\n namespace , click the fabric link in the table view, and navigate to\n the Storage policies tab for the fabric. Click the ellipsis seen under Actions for the storage policy to\n edit. Click Edit . Make the required changes. Click Save. Results The changes to the storage policy are saved\n successfully. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/edit_storage_policy.html", + "title": "Editing a Storage Policy" + }, + { + "content": "\nDeleting Storage Policy Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Storage Policies Manage storage policies related to data tiering. Deleting Storage Policy Delete storage policy. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Creating a Storage Policy Editing a Storage Policy Deleting Storage Policy Delete storage policy. Administering Remote Targets Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting Storage Policy Delete storage policy. Prerequisites The storage policy must not be associated with a volume. A\n storage policy that is associated with a volume cannot be deleted. About this task You can delete a storage policy that is no longer required, and has no association\n with any volume on any fabric. Follow the steps given below to delete a storage policy. Procedure Log on to the Data Fabric UI . Select Fabric manager option from\n the dropdown on the Home page. Click Fabric Administration seen on the Home page. Select from the fabrics dropdown the fabric for which you wish to delete a\n storage policy. Scroll down to the Storage policies card. NOTE: Alternatively, you can click Global namespace , click the fabric\n link in the table view, and navigate to the Storage policies tab\n for the fabric. Click the ellipsis seen under Actions for the storage policy to\n edit. Click Delete . Confirm the deletion. Results The storage policy is deleted. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/delete_storage_policy.html", + "title": "Deleting Storage Policy" + }, + { + "content": "\nAdministering Remote Targets Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Remote Targets HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Creating a Remote Target Create a remote target to offload cold data. Editing Remote Target Credentials Edit credentials for a remote target. Deleting a Remote Target Delete a remote target. Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Remote Targets Data Fabric provides rule-based automated tiering functionality that allows you to\n seamlessly integrate with third party cloud object storage as an additional storage\n tier in the data fabric cluster to store file data that is rarely accessed or\n archived data, which is referred to as cold data. See A cold tier is referred to as remote target on the Data Fabric UI. The remote storage where cold data can be offloaded is called a remote target. A remote target has a bucket on the third party cloud store where volume data is\n offloaded based on the policy configured by the fabric manager. Volume data in 64KB data chunks is packed into 8MB sized objects and offloaded to the\n bucket on the tier and the corresponding volume metadata is stored in a visible\n tier-volume as HPE Ezmeral Data Fabric Database tables on the data fabric cluster.\n During writes and reads, volume data is recalled to the data fabric cluster, if\n necessary. Data written to the volume is periodically moved to the remote target,\n releasing the disk space on the filesystem. You can associate a volume with a remote target. For cold data, you can offload your cluster data to public, private, and hybrid\n clouds. You can offload data to remote cloud from vendors such as Amazon AWS, Google\n Cloud Platform, Microsoft Azure, IBM Cleversafe, Hitachi HCP, and Minio. You can tap\n into cloud-scale capacity for cold data. NOTE: Data Fabric supports tiering for only file and volume data; tiering of tables\n and streams is not supported. Creating a Remote Target Create a remote target to offload cold data. Editing Remote Target Credentials Edit credentials for a remote target. Deleting a Remote Target Delete a remote target. (Topic last modified: 2023-07-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/managing_remote_targets.html", + "title": "Administering Remote Targets" + }, + { + "content": "\nCreating a Remote Target Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Remote Targets Creating a Remote Target Create a remote target to offload cold data. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Creating a Remote Target Create a remote target to offload cold data. Editing Remote Target Credentials Edit credentials for a remote target. Deleting a Remote Target Delete a remote target. Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Remote Target Create a remote target to offload cold data. Prerequisites You must be a fabric manager to perform this task. About this task You can create one or more remote targets to offload cold data to the remote\n target. Follow the steps given below to create a remote target. Procedure Log on to the Data Fabric UI . Select Fabric manager option from the dropdown on the Home page. Click Fabric Administration seen on the Home page. Select from the fabrics dropdown the fabric for which you wish to add a remote\n target. Scroll down to the Remote targets card. NOTE: Alternatively, you can click Global namespace , click the fabric\n link in the table view, and navigate to the Remote targets tab\n for the fabric. Click Create remote target . Enter the Name for the remote target. Select a cloud provider from the Vendor dropdown. Enter the URL . Select the Bucket . Enter the Region . Enter the Access key . Enter the Secret key . Click Create . Results The remote target is created successfully. The remote\n target can be used as a cold tier for data tiering. You can associate a volume with\n the remote target to offload the cold data from the volume on to the remote\n target. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/add_remote_target.html", + "title": "Creating a Remote Target" + }, + { + "content": "\nEditing Remote Target Credentials Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Remote Targets Editing Remote Target Credentials Edit credentials for a remote target. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Creating a Remote Target Create a remote target to offload cold data. Editing Remote Target Credentials Edit credentials for a remote target. Deleting a Remote Target Delete a remote target. Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Editing Remote Target Credentials Edit credentials for a remote target. Prerequisites You must be a fabric manager to perform this\n operation. About this task You can edit the credentials for a remote target. Follow the steps given below to\n edit remote target credentials. Procedure Log on to the Data Fabric UI . Select Fabric manager option from the dropdown on the Home page. Click Fabric Administration . Select from the fabrics dropdown the fabric for which the remote target has\n been created. Scroll down to the Remote targets card. NOTE: Alternatively, you can click Global namespace , click the fabric\n link in the table view, and navigate to the Remote targets tab\n for the fabric. Click the ellipsis under Actions for the remote target whose credentials\n you wish to change. Click Edit Credentials . Modify the Access key , Secret key or both, as required. Click Save . Results The updated credentials are saved for the remote\n target. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/editing_remote_target_credentials.html", + "title": "Editing Remote Target Credentials" + }, + { + "content": "\nDeleting a Remote Target Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Remote Targets Deleting a Remote Target Delete a remote target. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Creating a Remote Target Create a remote target to offload cold data. Editing Remote Target Credentials Edit credentials for a remote target. Deleting a Remote Target Delete a remote target. Administering Schedules Introduction to schedules. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Remote Target Delete a remote target. Prerequisites You must be a fabric manager or an infrastructure admin to\n perform this operation. The remote target to delete must not be associated with a\n volume. About this task You can delete a remote target that is not in use. Before you delete a remote target,\n ensure that the data on the remote target is backed up. Follow the steps given below to delete a remote target. Procedure Log on to the Data Fabric UI . Select Fabric manager option from the dropdown on the Home page. Click Fabric Administration seen on the Home page. Select from the fabrics dropdown the fabric for which you wish for which the\n remote target has been created. Scroll down to the Remote targets card. NOTE: Alternatively, you can click Global namespace , click the fabric\n link in the table view, and navigate to the Remote targets tab\n for the fabric. Click the ellipsis under Actions for the remote target whose credentials\n you wish to delete. Click Delete . Confirm the deletion. Results The remote target is deleted. The data on the cold tier is\n inaccessible. The remote target is no longer available for data tiering. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/delete_remote_target.html", + "title": "Deleting a Remote Target" + }, + { + "content": "\nAdministering Schedules Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Schedules Introduction to schedules. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Creating a Schedule Add a schedule for data tiering. Editing a Schedule Edit an existing schedule. Scheduling Volume Data Tiering Viewing Schedules View a list of schedules. Deleting a Schedule Delete a schedule. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Schedules Introduction to schedules. A schedule is a group of rules that specify recurring points in time at which certain\n actions are determined to occur. You can use schedules to automate the creation of\n snapshots and mirrors and the offload of volume data to a storage tier; after you\n create a schedule, it appears as a choice in the scheduling menu when you are\n creating or editing a volume. When you specify a snapshot schedule on a mirror volume, it specifies how\n often to take a snapshot of the mirror volume. This snapshot schedule is distinct\n from the snapshot schedule for the standard volume. A snapshot schedule for a\n promotable mirror volume has two purposes: It specifies how often to take a snapshot of the mirror volume for the purpose\n of preserving the state of the mirror before a subsequent mirror operation. This\n way, if corrupt data is copied from the source volume's snapshot into the mirror\n volume, the mirror contents can be rolled back to the snapshot. If the promotable mirror volume is promoted to a read-write volume, the snapshot schedule\n specified for the mirror is used for the promoted read-write volume. Once a mirror volume is promoted to a read-write volume, the mirror schedule is disabled. A mirror schedule specifies how frequently the mirror volume is synchronized\n with the source volume. In case of a disaster (or any type of data loss on a\n read-write source volume), the data can be recovered from the mirror volume, but any\n data written to the source volume since the last successful mirror operation will\n not be on the mirror volume. Therefore, you should set the mirror schedule such that\n it meets your RPO (Recovery Point Objective). A tier offload schedule specifies how frequently data in the volume on the\n fabric is offloaded to the tiered storage. This setting to automatically offload\n data to the storage tier. Creating a Schedule Add a schedule for data tiering. Editing a Schedule Edit an existing schedule. Scheduling Volume Data Tiering Viewing Schedules View a list of schedules. Deleting a Schedule Delete a schedule. (Topic last modified: 2023-07-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/managing_schedules.html", + "title": "Administering Schedules" + }, + { + "content": "\nCreating a Schedule Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Schedules Introduction to schedules. Creating a Schedule Add a schedule for data tiering. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Creating a Schedule Add a schedule for data tiering. Editing a Schedule Edit an existing schedule. Scheduling Volume Data Tiering Viewing Schedules View a list of schedules. Deleting a Schedule Delete a schedule. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Schedule Add a schedule for data tiering. Prerequisites You must be a fabric manager to perform this task. About this task A schedule is a group of rules that specify recurring points in time at which certain\n actions are determined to occur. You can use schedules to automate the creation of\n snapshots and mirrors. A schedule can be attached to a volume. If the A schedule can have one or more rows. A rule is made up of three elements. Frequency of the schedule such as yearly, daily, weekly, or specific time\n interval in minutes. Time at which schedule is to run Time period for which data is to be retained Follow the steps given below to create a schedule. Procedure Log on to the Data Fabric UI . Select Fabric manager option from the dropdown on the Home page. Click Fabric Administration . Select from the fabrics dropdown the fabric for which you wish to create a\n schedule. Scroll down to the Schedules card. Click Create schedule . Enter the Name for the schedule. Configure the rule. Select the frequency and time to trigger the schedule, and\n the data retention period. Click Create . Results The schedule is created with the specified rule or\n rules. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/adding_a_schedule.html", + "title": "Creating a Schedule" + }, + { + "content": "\nEditing a Schedule Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Schedules Introduction to schedules. Editing a Schedule Edit an existing schedule. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Creating a Schedule Add a schedule for data tiering. Editing a Schedule Edit an existing schedule. Scheduling Volume Data Tiering Viewing Schedules View a list of schedules. Deleting a Schedule Delete a schedule. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Editing a Schedule Edit an existing schedule. Prerequisites You must be a fabric manager to edit a schedule. About this task You can make changes to an existing schedule to incorporate any changes you wish to\n make to the schedule. Follow the steps given below to edit a schedule. Procedure Log on to the Data Fabric UI . Select Fabric manager option from\n the dropdown on the Home page. Select from the fabrics dropdown the fabric for which you wish the schedule has\n been created. Click Fabric Administration . Click the pencil icon for the schedule to edit. Make the necessary changes and click Save . Results The changes to the schedule are saved and the new schedule\n is applied to the data on the volumes to which the schedule applies. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/edit_a_schedule.html", + "title": "Editing a Schedule" + }, + { + "content": "\nScheduling Volume Data Tiering Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Schedules Introduction to schedules. Scheduling Volume Data Tiering HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Creating a Schedule Add a schedule for data tiering. Editing a Schedule Edit an existing schedule. Scheduling Volume Data Tiering Viewing Schedules View a list of schedules. Deleting a Schedule Delete a schedule. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Scheduling Volume Data Tiering About this task After creating a schedule, you can associate it with the tiering-enabled volume\n when you create or modify the volume. If a schedule for offloading data is\n associated with the volume, data is offloaded automatically as scheduled based on\n the rules associated with the volume for offloading data. For volumes enabled\n for cold tiering, you must assign a schedule to automatically offload data; if you\n do not assign a schedule, data is not offloaded automatically and you must manually\n run the offload command to offload data. See Manually Offloading Data to a Cold Tier for\n details on manually offloading data. Follow the steps given below to schedule tiering\n of data on a volume. Procedure Log on to the Data Fabric UI . Click the Table view icon on the Resources card. In the tabular list of fabrics, click the down arrow for the fabric that\n contains the volume with which you wish to associate a data tiering\n schedule. Click the volume name seen under Resource name . Navigate to the Settings tab. Under Schedules , click the pencil icon seen next to Object\n tiering . Select a suitable schedule option for the volume. Click Select . Results The selected schedule is applied to the volume and data\n tiering is done on the designated cold tier per schedule. (Topic last modified: 2023-08-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/scheduling_volume_data_tiering.html", + "title": "Scheduling Volume Data Tiering" + }, + { + "content": "\nViewing Schedules Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Schedules Introduction to schedules. Viewing Schedules View a list of schedules. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Creating a Schedule Add a schedule for data tiering. Editing a Schedule Edit an existing schedule. Scheduling Volume Data Tiering Viewing Schedules View a list of schedules. Deleting a Schedule Delete a schedule. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing Schedules View a list of schedules. Prerequisites You must be an infrastructure admin or a fabric manager to\n view the list of schedules. About this task You can view a list of all existing schedules to understand what the various\n schedules are. A fabric manager can decide if the required schedule already exists or it\n is required to create new schedules. Follow the steps given below to view a\n schedule. Procedure Log on to the Data Fabric UI . Select Fabric manager option or the Infrastructure admin experience option from the dropdown on the Home\n page. Click Fabric Administration . Select from the fabrics dropdown the fabric for which you wish view the\n schedules. Scroll down to the Schedules card. Results You are able to see a list of existing schedules. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_schedules.html", + "title": "Viewing Schedules" + }, + { + "content": "\nDeleting a Schedule Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Data Tiering Conceptual information about data tiering. Administering Schedules Introduction to schedules. Deleting a Schedule Delete a schedule. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Schedules for Volume Data Tiering Describes schedules for data tiering of volume data Manually Offloading Data to a Cold Tier Recalling Data to the Data Fabric File System Administering Storage Policies Manage storage policies related to data tiering. Administering Remote Targets Administering Schedules Introduction to schedules. Creating a Schedule Add a schedule for data tiering. Editing a Schedule Edit an existing schedule. Scheduling Volume Data Tiering Viewing Schedules View a list of schedules. Deleting a Schedule Delete a schedule. Mirroring Synopsis of mirrors and mirroring process. Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting a Schedule Delete a schedule. Prerequisites You must be a fabric manager to delete a schedule. About this task You can delete a schedule that is not associated with a volume. TIP: Attach another schedule to a volume to overwrite the existing\n schedule (schedule to delete) attached to the volume. Once another schedule is\n attached, you can delete the old schedule. Follow the steps given below to delete a schedule. Procedure Log on to the Data Fabric UI . Select Fabric manager option from\n the dropdown on the Home page. Click Fabric Administration . Select from the fabrics dropdown the fabric for which you wish to delete the\n schedule. Click the bin icon for a schedule, to delete a schedule. Confirm the\n deletion. Results The schedule is deleted. (Topic last modified: 2023-11-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/deleting_a_schedule.html", + "title": "Deleting a Schedule" + }, + { + "content": "\nMirroring Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Mirroring Synopsis of mirrors and mirroring process. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Local Mirroring Remote Mirroring Starting Volume Mirroring Start mirroring of data on a mirror volume. Stopping Volume Mirroring Stop mirroring of data that is in progress on a mirror volume. Scheduling Volume Mirroring Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Mirroring Synopsis of mirrors and mirroring process. Creating a mirror volume is similar to creating a normal read/write volume. However, when\n you create a mirror volume, you must specify a source volume from which the mirror\n retrieves content. This retrieval is called the mirroring operation. Like a normal\n volume, a mirror volume has a configurable replication factor. Only one copy of the data\n is transmitted from the source volume to the mirror volume. HPE Ezmeral Data Fabric\n volumes can only be mirrored and NOT replicated. However, the source and mirror volumes\n handle their own internal HPE Ezmeral Data Fabric filesystem replication (which is based\n on the replication factor) independently. file system internally replicates source and\n mirror volumes independently of each other. Volume mirroring from a lower HPE Ezmeral Data Fabric version to higher HPE Ezmeral Data\n Fabric version is supported. Volume mirroring from a higher Data Fabric version to a\n lower Data Fabric version is not supported. Mirroring Process The HPE Ezmeral Data Fabric system creates a temporary snapshot of the source volume\n at the start of a mirroring operation. The mirroring process reads content from the\n snapshot into the mirror volume. The source volume remains available for read and\n write operations during the mirroring process. If the mirroring operation is schedule-based, the snapshot expires according to the\n value of the schedule's Retain For parameter. Snapshots created during manual\n mirroring persist until they are deleted manually. The mirroring process transmits only the differences between the source volume and\n the mirror. The initial mirroring operation copies the entire source volume, but\n subsequent mirroring operations can be extremely fast. Local Mirroring Remote Mirroring Starting Volume Mirroring Start mirroring of data on a mirror volume. Stopping Volume Mirroring Stop mirroring of data that is in progress on a mirror volume. Scheduling Volume Mirroring (Topic last modified: 2023-07-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/mirroring.html", + "title": "Mirroring" + }, + { + "content": "\nLocal Mirroring Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Mirroring Synopsis of mirrors and mirroring process. Local Mirroring HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Local Mirroring Remote Mirroring Starting Volume Mirroring Start mirroring of data on a mirror volume. Stopping Volume Mirroring Stop mirroring of data that is in progress on a mirror volume. Scheduling Volume Mirroring Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Local Mirroring A local mirror volume is a mirror volume whose source is on the same\n cluster. Local mirror volumes are useful for load balancing or for providing a\n read-only copy of a data set. You can locate your local mirror volumes in\n specific servers or on racks with particularly high bandwidth, mounted in a\n public directory separate from the source volume. The most frequently\n accessed volumes in a cluster are likely to be the root volume and its immediate\n children. To load-balance read operations on these volumes, mirror the root\n volume (typically mapr.cluster.root , which is mounted at / ). By mirroring these volumes, you can serve read requests\n from the mirrors, and distribute load across the nodes. Less-frequently accessed\n volumes that are lower in the hierarchy do not need mirror volumes. Since the\n mount paths for those volumes are not mirrored throughout, those volumes are\n writable. If you are creating a local mirror of the root volume,\n root( / ) points to the mirror volume, hence root is read-only.\n For read-write copy of root ( / ), you must use the special path, /.rw (Topic last modified: 2023-07-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/local_mirroring.html", + "title": "Local Mirroring" + }, + { + "content": "\nRemote Mirroring Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Mirroring Synopsis of mirrors and mirroring process. Remote Mirroring HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Local Mirroring Remote Mirroring Starting Volume Mirroring Start mirroring of data on a mirror volume. Stopping Volume Mirroring Stop mirroring of data that is in progress on a mirror volume. Scheduling Volume Mirroring Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Remote Mirroring A remote mirror volume is a mirror volume with a source in another cluster. You can use\n remote mirrors for offsite backup, for data transfer to remote facilities, and for load\n and latency balancing for large websites. By mirroring the cluster's root volume and all\n other volumes in the cluster, you can create an entire mirrored cluster that keeps in\n sync with the source cluster. Backup mirrors for disaster recovery can be located on physical media outside the\n cluster, or in a remote cluster. If disaster strikes the source cluster, you can check\n the time of last successful synchronization to determine the freshness of the\n backup. Once data volumes are created in a primary data center, the data-fabric administrator\n creates mirror volumes in a remote secondary data center. (Topic last modified: 2023-07-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/remote_mirroring.html", + "title": "Remote Mirroring" + }, + { + "content": "\nStarting Volume Mirroring Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Mirroring Synopsis of mirrors and mirroring process. Starting Volume Mirroring Start mirroring of data on a mirror volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Local Mirroring Remote Mirroring Starting Volume Mirroring Start mirroring of data on a mirror volume. Stopping Volume Mirroring Stop mirroring of data that is in progress on a mirror volume. Scheduling Volume Mirroring Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Starting Volume Mirroring Start mirroring of data on a mirror volume. Prerequisites You must have created a mirror volume, on which you can mirror data from the\n source volume associated with the mirror volume. Data must be present on the source volume. About this task Data from the associated source volume can be mirrored to a mirror volume. Follow the steps given below to start mirroring of data on to a mirror volume. Procedure Log on to the Data Fabric UI . Under the default Fabric user experience , click the Table view icon on\n the Resources card. In the tabular list of fabrics, click the down arrow\n for the fabric that contains the volume. Click the volume name seen under Resource Name . Navigate to the Mirrors tab. Click the ellipsis under Actions for the required volume. Click the Start mirroring option, and click Start on the Start\n mirroring message box to confirm that you wish to start mirroring. Results Mirroring of data on the source volume is triggered. The\n status under the Mirroring column on Mirrors tab for the volume changes to On . The percentage of data mirrored from the source volume is displayed for\n the volume under the Mirrored column on the Mirrors tab. (Topic last modified: 2023-10-18) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/starting_data_mirroring.html", + "title": "Starting Volume Mirroring" + }, + { + "content": "\nStopping Volume Mirroring Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Mirroring Synopsis of mirrors and mirroring process. Stopping Volume Mirroring Stop mirroring of data that is in progress on a mirror volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Local Mirroring Remote Mirroring Starting Volume Mirroring Start mirroring of data on a mirror volume. Stopping Volume Mirroring Stop mirroring of data that is in progress on a mirror volume. Scheduling Volume Mirroring Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Stopping Volume Mirroring Stop mirroring of data that is in progress on a mirror volume. Prerequisites Data mirroring must be in progress on the mirror\n volume. About this task You can stop mirroring of data that is in progress from the associated source volume\n onto a mirror volume. Follow the steps given below to stop mirroring of data on to a mirror volume. Procedure Log on to the Data Fabric UI . Under the default Fabric user experience , click the Table View icon on\n the Resources card. In the tabular list of fabrics, click the down arrow\n for the fabric that contains the volume. Click the volume name seen under Resource Name . Navigate to the Mirrors tab. Click the ellipsis under Actions for the volume being mirrored. Click the Stop mirroring option. Results Mirroring of data that is in progress from the source volume\n onto the mirror volume is stopped.. The status under the Mirroring column on\n Mirrors tab for the volume changes to Off . (Topic last modified: 2023-10-18) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/stopping_data_mirroring.html", + "title": "Stopping Volume Mirroring" + }, + { + "content": "\nScheduling Volume Mirroring Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Mirroring Synopsis of mirrors and mirroring process. Scheduling Volume Mirroring HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Creating a Standard Volume Procedure to create standard volume. Creating a Mirror Volume Procedure to create mirror volume. Converting Standard Volume to Mirror Volume Editing a Volume Edit accountable entity, volume access for accountable entity and volume hard quota. Renaming a Volume Rename a volume. Viewing Volume Endpoint Info View volume endpoint information. Viewing Object Endpoint Info to Remotely Access Files as Objects View endpoint information for files in a volume to be able to access the files as objects when accessed by S3 client. Downloading Volume Endpoint Information Download JSON file containing endpoint information for the selected volume endpoint information. Deleting a Volume Delete a single volume. Administering Volume Snapshots Snapshot overview and administering snapshots. Data Tiering Conceptual information about data tiering. Mirroring Synopsis of mirrors and mirroring process. Local Mirroring Remote Mirroring Starting Volume Mirroring Start mirroring of data on a mirror volume. Stopping Volume Mirroring Stop mirroring of data that is in progress on a mirror volume. Scheduling Volume Mirroring Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Scheduling Volume Mirroring Prerequisites You must have the privilege to schedule mirroring. The volume for which you are scheduling mirroring must be a mirror volume. About this task When you choose the mirror schedule, consider the amount of data on the volume and\n the load on the cluster. Remember that the mirroring frequency must allow enough\n time to complete one mirror operation before the next scheduled mirror operation\n starts. In addition, if you have a cascaded mirror setup (where A mirrors to B which\n mirrors to C), you cannot set a mirror schedule for C that starts before B finishes\n mirroring from A. WARNING: Avoid setting a mirror schedule for more often than every 30\n minutes. Follow the steps given below to schedule mirroring on a volume. Procedure Log on to the Data Fabric UI . Under the default Fabric user experience , click the Table view icon on\n the Resources card. In the tabular list of fabrics, click the down arrow for the fabric that\n contains the volume with which you wish to associate a mirroring schedule. Click the volume name seen under Resource name . Navigate to the Settings tab. Under Schedules , click the pencil icon seen next to Mirror . Select a suitable schedule option for the volume. Click Select . Results The selected schedule is applied to the volume and data\n mirroring is performed per the schedule. (Topic last modified: 2023-10-18) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/scheduling_volume_mirroring.html", + "title": "Scheduling Volume Mirroring" + }, + { + "content": "\nAuditing Fabric and Fabric Data Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Auditing Fabric and Fabric Data Auditing in Data Fabric HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Enabling/Disabling Fabric Auditing Enable/disable fabric auditing Configuring Auditing for Data Access Operation Enable or disable auditing for data access operations on fabric. Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Auditing Fabric and Fabric Data Auditing in Data Fabric You can enable auditing of cluster administration and data-access operations using\n the Data Fabric UI. Levels of Auditing There are two levels of\n auditing: Auditing for fabric-level administration operations Auditing of data access operations on the fabric Enabling/Disabling Fabric Auditing Enable/disable fabric auditing Configuring Auditing for Data Access Operation Enable or disable auditing for data access operations on fabric. (Topic last modified: 2023-05-04) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/auditing_fabric_and_fabric_data.html", + "title": "Auditing Fabric and Fabric Data" + }, + { + "content": "\nEnabling/Disabling Fabric Auditing Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Auditing Fabric and Fabric Data Auditing in Data Fabric Enabling/Disabling Fabric Auditing Enable/disable fabric auditing HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Enabling/Disabling Fabric Auditing Enable/disable fabric auditing Configuring Auditing for Data Access Operation Enable or disable auditing for data access operations on fabric. Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Enabling/Disabling Fabric Auditing Enable/disable fabric auditing About this task Follow the steps given below to enable/disable auditing of fabric administration\n operations. Procedure Log on to the Data Fabric UI with Admin or\n Fabric Manager credentials . Under the default Fabric user experience , click the Table view icon on\n the Resources card. Click the link for the fabric in the Fabrics list for\n which you wish to enable auditing. Navigate to the Settings tab on the fabrics page. Under Fabric Settings , click the Edit icon. Toggle Cluster Auditing to enable auditing on the fabric Results Auditing of fabric administration operations is enabled on\n the fabric. After auditing is enabled, audit log entries are generated when fabric\n administration operations are performed. (Topic last modified: 2023-10-18) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/configuring_data_auditing.html", + "title": "Enabling/Disabling Fabric Auditing" + }, + { + "content": "\nConfiguring Auditing for Data Access Operation Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Auditing Fabric and Fabric Data Auditing in Data Fabric Configuring Auditing for Data Access Operation Enable or disable auditing for data access operations on fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Enabling/Disabling Fabric Auditing Enable/disable fabric auditing Configuring Auditing for Data Access Operation Enable or disable auditing for data access operations on fabric. Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Configuring Auditing for Data Access Operation Enable or disable auditing for data access operations on fabric. About this task Data access auditing can be enabled via the Data Fabric UI . Follow the steps given below to audit data access operation on a fabric. Procedure Log on to the Data Fabric UI with Admin or\n Fabric Manager credentials . Click the Table View icon on the Resources card. Click the link for the fabric in the Fabrics list for which you wish to\n enable auditing. Navigate to the Settings tab on the fabrics page. Under Fabric Settings , click the Edit icon. Select the On option for Configure auditing of data-access\n operations. Enter the maximum size for the audit log. Enter the retention period. Click Update Results Auditing for data access operations is enabled on the fabric\n with the specified audit log size and retention period. (Topic last modified: 2023-06-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/enabling_disabling_data_auditing_on_fabric.html", + "title": "Configuring Auditing for Data Access Operation" + }, + { + "content": "\nAdministering Security Policies Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Security Policies Add, edit, delete, and manage state of security policies. A security policy is an access control mechanism that can be applied to data objects\n on a fabric. Once a security policy is applied, it governs how a user can access\n data objects on the volume to which the security policy is applied. A security policy can be associated with a volume. Security Policy Life Cycle The state of a security policy is interpreted as a combination of two parameters: allow tagging access control The following table explains the various values of the allow tagging and access\n control parameters. Parameter Accepted Values and Description Default value allow tagging false Disables tagging; users cannot apply the security policy\n to data objects. This is the default setting when the fabric manager\n creates a security policy. The fabric manager can\n specify the setting explicitly when creating the\n security policy. When a security policy is active (allow tagging=true)\n but needs to be deprecated, modify the policy and set\n allow tagging=false. This prevents users from tagging\n any other data objects with the policy. Note that the\n system continues to enforce the security controls set in\n the security policy for data objects that were already\n tagged with the security policy. true Enables tagging; users can apply the security policy\n to data objects. When creating or modifying a security policy, a\n fabric manager can set allowtagging to true. When creating a security policy, as a fabric\n manager, you may want to set this parameter to true\n to test the security settings in the policy or to\n use tagging tools to discover data content and tag\n the data. To enable a deprecated security policy, set allow\n tagging to true. false access control Disarmed Unless the fabric manager changes the setting when\n creating the security policy, this is the default\n setting if the fabric manager creates a security\n policy. The system does not enforce the access permissions set\n in the security policy during data operations on the\n data objects tagged with the security policy. Armed The system enforces the permissions set in the\n security policy during data operations on the data\n objects tagged with the security policy. When creating or modifying a security policy, as a\n fabric manager, you can set access control to\n Armed. To enforce access permissions set in a deprecated\n security policy, the fabric manager can set access\n control to Armed. The system continues to enforce\n access permissions set in the security policy for all\n data operations on the data objects tagged with the\n policy. Denied Denies all access to data objects tagged with the\n security policy. Disarmed You can change the state of a security policy through the allow\n tagging and access control parameters to move a\n security policy through a life cycle, as shown in the following image where the\n security policy moves from new to retired. The following table describes each of the stages in the security policy life\n cycle: Stage Description new (default) Default upon security policy creation. Users cannot tag data objects with the security\n policy. The system does not enforce access permissions set in\n the security policy in use Users can tag data objects with the security\n policy. The system enforces all security controls set in the\n security policy during data operations on data objects\n tagged with the security policy. Security controls set in the policy can include access\n permissions, auditing, and wire-level encryption. deprecated Users can no longer tag the security policy to data\n objects. The system still enforces the security controls set in\n the security policy for all data operations on the data\n objects tagged with the policy. Users cannot tag any\n additional data objects with the policy. retired Users cannot tag the security policy to data\n objects. All data operations on the data objects tagged with the\n security policy are denied by the system. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. (Topic last modified: 2024-01-17) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/managing_security_policies.html", + "title": "Administering Security Policies" + }, + { + "content": "\nAbout Security Policy Domain Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. About Security Policy Domain Describes a security policy domain. A security policy domain is a group of fabrics that directly or indirectly share data and\n use the same security policies to control access to the data. A security policy domain\n consists of one master fabric and zero or more member security policy fabrics that\n create a global security policy namespace. A global policy master is a prerequisite for the creation of security policies. A global\n policy master is a fabric on which security policies can be created. You can create and modify security policies only on the fabric that is designated as the\n global policy master. When you create or update security policies, the policy server\n updates the mapr.pbs.base volume with the security policy metadata.\n Subsequently, the security policies are mirrored to other member fabrics in the global\n namespace. By default, the first fabric or the primary fabric that you create on the global\n namespace is designated as the global policy master. Hence, it is not required to\n explicitly assign an alternate global policy master, unless the primary fabric goes\n down. Each fabric, to which a security policy is applied, operates independently and,\n therefore, does not require network connectivity to the global policy master to enforce\n policies. A security policy server in each of the fabrics enforces the policies and\n manages the security policy metadata in an internal volume named mapr.pbs.base . See Security Policy Implementation Workflow for details on how to apply\n security policies to fabric volumes on Data Fabric. (Topic last modified: 2024-01-23) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/about_security_policy_domain.html", + "title": "About Security Policy Domain" + }, + { + "content": "\nSecurity Policy Implementation Workflow Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a\n security policy. In the diagram below, all member fabrics, that is, Member\n Fabric 1 , Member Fabric 2 , Member Fabric\n 3 , and Member Fabric 4 get the policy metadata from the\n global policy master. The security policies are mirrored to\n all member fabrics in the global namespace from the global policy\n master. 1 \u2013 Create a Security Policy Domain By default, the primary fabric is set as the global policy master and all secondary\n fabrics are member fabrics in the global namespace, with respect to security\n policies. You must create security policies on the primary fabric as security\n policies are propagated to secondary fabrics or member fabrics by way of mirroring\n of the security policies. The mirroring takes place every 15 minutes. . IMPORTANT: If a\n primary fabric needs to go offline or fails, set one of the member fabrics as\n the new primary fabric. Use the following command to set a member fabric as\n the primary fabric. maprcli clustergroup updateprimary -clustername Once a new member fabric is converted into a primary cluster, it automatically\n becomes the global policy master. To identify if a fabric is the global policy master, run the following command on the\n command line for the\n fabric: maprcli config load --keys \"cldb.pbs.global.master\" The value of cldb.pbs.global.master is 1 in the\n output of the aforementioned command, for the fabric that is designated as the\n global policy master. For a member fabric, the value is 0. 2 - Create and Update Security Policies You can create and update security policies on the global policy master only. You\n cannot create or modify security policies on member fabrics. See Creating a Security Policy for instructions to create a security policy. The following table lists the operations you can and cannot perform on the global\n policy master and member security-policy fabrics: Security-policy fabric type Allowed operations Prohibited operations Master (fabric set as the global policy master) Create Modify Export View Tagging Import Member Import Export View Tagging Create Modify 3 \u2013 Propagate Security Policies Once you create security policies on the global policy master, the\n policy metadata is automatically mirrored on to the other fabrics that are members\n of the global namespace. Data Movement Considerations The policy server in each security-policy fabric manages security policies and\n composite IDs. A composite ID is a unique, internal integer that maps to a security\n policy or set of security policies. The policy server stores the mapping in an\n internal volume named mapr.pbs.composite . When you assign a security policy to a filesystem resource, the composite ID for that\n security policy is stored with the resource. Storing the composite ID with the\n resource instead of the security policy itself optimizes storage. For example, if a\n policy named HIPAA maps to composite ID 200, this composite ID is stored with any\n file you tag with HIPAA. Security policies are shared across the security policy domain, but composite IDs are\n not. The same security policy on fabricA will have a different composite ID on\n fabricB and fabricC, as shown in the following table: Fabric Name Security Policy Fabric ID fabricA HIPAA 200 fabricB HIPAA 500 fabricC HIPAA 800 By default, up to one million composite IDs can be created instantly after which\n there is a throttle process in place. The default limit of one million composite IDs\n is sufficient for about one thousand security policies. Using security policies as\n intended should not trigger the throttle process. However, using security policies\n for general tagging purposes can quickly exhaust composite IDs and trigger\n throttling. Important Notes About Composite IDs You cannot see or interact with composite IDs. However, if you copy a file\n from one fabric to another, only the data is copied. The policy server on\n the destination fabric does not recognize the composite ID associated with\n the file and therefore cannot enforce the access controls configured in the\n policy. To avoid this issue, use mirroring to synchronize data. During\n mirroring, security policies are propagated to the destination fabric. The\n policy server on the destination fabric assigns new composite IDs to the\n security policies before data synchronization starts. The composite\n ID/security policy mappings are present when data synchronizes. Do not schedule mirroring for the composite ID internal volume mapr.pbs.composite . Composite IDs are only used with filesystem resources. The database stores\n policies as an array of policy IDs in the key-value store. The database\n policy IDs are unique across the global policy domain, which simplifies\n table replication. For example, policy IDs in JSON tables can be copied from\n one fabric to another. The server deals with the policy ID, not the policy\n name. Policy IDs are evaluated and translated to the policy name on the\n client side. (Topic last modified: 2024-01-29) On this page 1 \u2013 Create a Security Policy Domain 2 - Create and Update Security Policies 3 \u2013 Propagate Security Policies Data Movement Considerations \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/security_policy_implementation_workflow.html", + "title": "Security Policy Implementation Workflow" + }, + { + "content": "\nSecurity Policy Enforcement Process Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on\n volumes. Order of Enforcement Data\n Fabric File System enforce security policies\n hierarchically, starting at the volume level. If the volume-level enforcement mode is set to PolicyAceAndDataAce (default setting), the system evaluates and enforces the ACEs directly applied to data\n objects AND the ACEs defined in the security policies applied to data objects. When a user\n submits a data-operation request, the system evaluates and enforces the ACEs hierarchically,\n starting with the volume in which the data resides. For example, to perform a write operation on a file, the system first evaluates\n permissions on the volume in which the file resides. If at least one security policy\n is applied to the volume, the system evaluates the ACEs set in the security policy\n AND the ACEs or POSIX mode bits directly applied to the volume. Both sets of ACEs\n must allow the user to access the volume. If one set of ACEs does not permit access\n to the volume, the system denies the user permission to perform the operation. If\n both sets of ACEs permit access to the volume, the system checks access permissions\n on the file. The system evaluates security policies applied to the file AND any ACEs\n or POSIX mode bits applied directly to the file. Both sets of ACEs must permit the\n user write access on the file. If they both allow access\n ( writefileeace ), the user can perform the data operation on the\n file. If not, the system denies access. Note the following behaviors related to the enforcement mode setting: When set to PolicyAceOnly , the system only enforces the ACEs set in\n security policies. A user can only perform data operations on a data object if the\n security policies associated with the data object allow the user access. However, if a\n data object is not associated with at least one security policy, the system enforces any\n ACEs or POSIX mode bits set directly on the data object. In this case, a user can only\n access the data object if the ACEs or POSIX mode bits set directly on the data object\n allow the user access. In PolicyAceOnly and PolicyAceAndDataAce modes, if a\n security policy is applied to a data object, and ACEs are not defined in the policy\n ( \"\" ), the system continues to the next level data object to evaluate\n permissions. Data Fabric File\n System Enforcement Process The Data Fabric filesystem enforces security policies on data objects, in the following\n order: Volumes Files/Directories NOTE: The system only enforces directory ACEs when\n determining access to the directory during directory operations. For\n read and write operations, directory ACEs are enforced during the\n path-walk operation when opening a file. If the user has a handle (FID)\n to the file, the user can access the file directly with the FID. In that\n case, the system ignores directory ACEs. See Managing File and Directory ACEs for details on directory\n ACEs. The following diagram shows the order in which the Data Fabric filesystem evaluates and enforces\n data operations on data objects when the enforcement mode is set to PolicyAceOnly : NOTE: If no policy is applied at the volume or\n file/directory level, the system will enforce DataAces (mode and ACEs applied directly on\n data object) to protect the data. The following diagram shows the order in which the Data Fabric file\n system evaluates and enforces data operations on data objects when\n the enforcement mode is set to PolicyAceAndDataAce (default\n mode): The following diagram shows the order in which the Data Fabric file system evaluates and audits data operations on data objects when\n the enforcement mode is set to PolicyAceAuditAndDataAce (permissive\n mode): NOTE: The system does not enforce denied-access checks, but does log the\n information about the denied check in the audit logs. (Topic last modified: 2024-01-19) On this page Order of Enforcement Data Fabric File\n System Enforcement Process \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/security_policy_enforcement.html", + "title": "Security Policy Enforcement Process" + }, + { + "content": "\nUnderstanding Access Control in a Security Policy Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security\n policy. The following types of access can be granted to all (Public) or specific\n users or groups: Entity Permission Directories Read the contents of a directory. If you\n do not select this permission, mode bits are used to determine\n read access. To read the contents of a directory that is tagged\n with this security policy, the user must also have read\n permissions on the volume, the parent directory (if any), and\n the file. Lookup or list the contents in a\n directory. If you do not select this permission, mode bits are\n used to determine lookup access. To lookup a file of directory\n that is tagged with this security policy, the user must also\n have read permissions on the volume and the lookup permission on\n the directory. List the contents of a directory. If you\n do not select this permission, mode bits are used to determine directory list access. To list the\n contents of a directory that is tagged with this security\n policy, the user must also have read permissions on the volume,\n and lookup permission on all directories in the path (if\n any). Add a file or subdirectory. If you do not\n select this permission, mode bits are used to determine\n permissions to create files or subdirectories. To add a child to\n a directory that is tagged with this security policy, the user\n must also have write permissions on the volume and the parent\n directory, add child permission on the parent directory, and\n read and execute permissions on all directories in the\n path. Delete a file or subdirectory. If you do\n not select this permission, mode bits are used to determine\n permissions to create files and/or subdirectories. To delete a\n child of a directory that is tagged with this security policy,\n the user must also have write permissions on the volume and\n delete child permission on the parent directory, and lookup\n permissions on all directories in the path. For more information, see Managing File and Directory ACEs . Files Read a file. If you do not select this\n permission, mode bits are used to determine read access to file.\n To read a file that is tagged with this security policy, the\n user must also have read permissions on the volume, and lookup\n permission on all directories in path. Write to a file. If you do not select\n this permission, mode bits are used to determine read access to\n the file. To write to a file that is tagged with this security\n policy, the user must also have write permissions on the volume,\n and lookup permission on all directories in the path. Execute a file. If you do not select this\n permission, mode bits are used to determine execute access to\n the file. To execute a file that is tagged with this security\n policy, the user must also have read permissions on the volume,\n and lookup permission on all directories in the path. For more information, see Managing File and Directory ACEs . (Topic last modified: 2024-01-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/understanding_access_control_in_a_security_policy.html", + "title": "Understanding Access Control in a Security Policy" + }, + { + "content": "\nManaging File and Directory ACEs Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and\n directories. A file Access Control Expression (ACE) allows you to define\n access (allowlist and denylist) to files and directories for a combination of users, groups,\n and roles. If access control expression (ACE) s are not set,\n POSIX mode bits for the file or directory are used to grant or deny access to the file or\n directory. When you set access control expression (ACE) s, Data\n Fabric sets or resets the corresponding POSIX mode bits to match the permissions granted\n through access control expression (ACE) s. If both access control expression (ACE) s and POSIX mode bits are set, access is granted if access is allowed through access control expression (ACE) s or POSIX mode bits. If access control expression (ACE) s are not set, POSIX mode bits are used to grant access. If neither access control expression (ACE) s nor POSIX mode bits are set, access is denied. File ACEs Access Type Description Command Line Java API (Enum) -readfile READFILE Read a file. -writefile WRITEFILE Write to a file. -executefile EXECUTEFILE Execute a file. Directory ACEs = Access Type Description Command Line Java API (Enum) -readfile READFILE Read a file. -writefile WRITEFILE Write to a file. -executefile EXECUTEFILE Execute a file. -readdir READDIR List the contents of a directory. This access is required to write \n and/or execute files in the directory. -lookupdir LOOKUPDIR Lookup a file in a directory. This access is required to find, read, write, \n and/or execute files in the directory. -addchild ADDCHILD Add a file or subdirectory. -deletechild DELETECHILD Delete a file or subdirectory. Although you can set both file and directory access control expression (ACE) s on directories, only the directory access control expression (ACE) s \n are used for determining access to the directory. The file access control expression (ACE) on the directory is used as \n the default access control expression (ACE) setting for new files under that directory. By default, when you set access control expression (ACE) s on a parent directory: Permissions for existing files and subdirectories under that parent remain unchanged. New files under that parent inherit the file access control expression (ACE) s and corresponding POSIX mode bits of\n the parent directory, if available. Otherwise, new files get the default access control expression (ACE) , the empty string (\"\"), which indicates\n that no one has permissions to read, write, or execute the file. POSIX mode bits are set\n on the file in the traditional way. New subdirectories under the parent inherit both the directory and file access control expression (ACE) s and\n corresponding POSIX mode bits from the parent directory. NOTE: When accessing files and directories, the access control expression (ACE) s on files have no effect on accessing the parent directory. Workaround for Execute Operation when ACES are set on an executable file When access control expression (ACE) s are set on any\n file, mode bits are cleared. For a binary to execute, the kernel checks whether the execute\n bit is set or not, and restricts execution if it is not set. To run an executable file with access control expression (ACE) s set on it, use one of\n the following workarounds: Set owner mode exec bit on binaries/shell scripts. Set group mode exec bit on binaries/shell scripts. Change owning group for the files to the group used in MapRAces, and set the\n executable group mode bit. (Topic last modified: 2024-01-09) On this page File ACEs Directory ACEs Workaround for Execute Operation when ACES are set on an executable file \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/filedirace.html", + "title": "Managing File and Directory ACEs" + }, + { + "content": "\nSecurity Policy Permissions Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Security Policy Permissions Permissions define which administrative users can create, view, and modify security\n policies. Administrators set the permissions on security policies through cluster-level and\n security policy-level ACLs. Permission Levels Policy-based security supports cluster-level and policy-level permissions. The following table describes the two permission levels: Permission Level Description Cluster-level Controls which administrators can create and view security policies in a\n cluster. Administrators with cluster-level cp permission can create\n security policies. Administrators with cluster-level fc permission can view\n all the security policies created. Policy-level Controls which administrators can view and modify security policies. Policy-level permissions are set on a per-policy basis. Permissions set on one security policy do not apply to other security\n policies. Administrators with cluster-level permissions can set cluster-level and security\n policy-level permissions through any of the following tools: Data Fabric UI maprcli acl set|edit commands maprcli security policy create commands IMPORTANT: Note these important considerations for security-policy\n permissions: On a fresh cluster install, the root user and the data-fabric user\n (typically named mapr or hadoop on each node) have cp permission. On an upgraded cluster, only the data-fabric user\n has cp permission. As the cluster owner, the data-fabric user (typically named mapr or hadoop on each node), has overriding permission on security\n policies, including the administrative ACLs. The data-fabric user can create, view,\n and modify security policies, regardless of the cluster-level and policy-level\n permission specified. By default, administrators do not have permission to create security policies. Administrators need cluster-level cp (create security policy) permission to create security policies.\n Administrators with cluster-level a (admin) permission can grant cp permission to themselves or other administrators. TIP: You must designate a cluster as the global policy master before you\n create security policies. Setting a global policy master creates a global namespace\n for security policies. See Designating a Fabric as Global Policy Master . Any user with a valid data-fabric ticket can view security policy IDs and names.\n This allows non-administrative users to determine which security policies to apply to\n data objects. Permission Codes Cluster-level and security\n policy-level permission codes that are set through ACLs grant security policy access to\n administrators. An administrator (with cluster-level a (admin) and cp (create security policy) permissions) that creates a security policy\n has full control over the security policy unless they specifically grant other\n administrators access to the security policy through policy-level permissions. The\n following sections describe the cluster-level and policy-level permission codes for security\n policy access: Cluster-Level Permission Codes The following table lists some cluster-level permission codes and how they relate to\n security policies. Cluster-level permission code Description a (admin) Grants administrative access to cluster ACLs. Can grant create security policy (cp) permission to\n themselves or other administrators. Cannot view or edit the details of any security policy created by other\n admins. Can only view the security policy ID and name. Needs security policy-level permissions to view or edit security policies\n created by other admins. cp (create security policy) ATTENTION: Administrators need this permission to create security\n policies. Administrators with a (admin) cluster-level permission\n can grant cp permission to themselves or other\n administrators. Administrators can view and edit all parts of the security policies they\n create, including the ACEs and permissions on the security policies. Grants the administrator that creates a security policy the following\n security policy-level permissions on the security policy: Full Control (fc) Admin (a) Read (r) Administrators who create security policies can override their access to\n the security policies by designating policy owners who can then manage the\n security policies. fc (full control) Grants full control over the cluster and enables all cluster-level\n administrative options. Cannot change the cluster-level ACLs. Can view all security policies. Cannot create security policies. Cannot edit the details of any security policy unless specifically granted\n access to a security through policy-level permissions. Policy-Level Permission Codes Separate read (r) and edit (fc) permissions for\n policy owners allow some policy owners to view policy information while others can\n edit policy information. This allows most administrators to administer the system\n without seeing the data and also prevents some policy owners from adding their\n credentials to the administrative ACLs to manipulate the data access ACEs. Policy-level permissions are set on a per-policy basis. Permissions set on one\n security policy do not apply to other security policies. The following table lists the policy-level permission codes needed to perform actions\n on security policies. Policy-level permission code Description a (admin) Can view and modify permissions on the security policy. Cannot view or modify the security policy; can only view the security\n policy name and ID. fc (full control) Can view and edit any part of the security policy, including the data\n access ACEs. Cannot view or modify permissions on the security policy. r (read) Can view all parts of a security policy, but cannot modify any part of\n the security policy. Permissions Table The following table lists the cluster-level and policy-level permissions needed to\n perform specific actions on security policies: NOTE: Administrators who create a security\n policy have policy-level r , a , and fc permission on the security policy. Action Cluster-Level Policy-Level Create a security policy cp -- View details of all security policies fc -- View details of a security policy -- r View and edit permissions on a security policy (ACLs) -- a View and edit the details of a security policy (ACEs, auditing,\n wire-level encryption) -- fc For more information, see Creating a Security Policy Editing a Security Policy Enabling a Security Policy Assigning Multiple Security Policies to One or More Volumes About ACL (Topic last modified: 2024-01-29) On this page Permission Levels Permission Codes \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/security_policy_permissions.html", + "title": "Security Policy Permissions" + }, + { + "content": "\nDesignating a Fabric as Global Policy Master Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. About this task A security policy domain is a group of fabrics that share data and use the same\n security policies to control access to the data. A security policy domain consists\n of a global policy master and zero or more member fabrics that constitute a global\n security policy namespace. Before you can create security policies, one fabric must\n be set as the global policy master, and security policies must be created and\n managed only on the fabric that is designated as the global policy master. The primary fabric is auto-designated as the global policy master.\n Hence, it is not required to explicitly designate a fabric as a global policy\n master. NOTE: Every 15 minutes, the policies created on the global policy\n master are mirrored on the member fabrics in the same global namespace. (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/designate_fabric_as_global_policy_master.html", + "title": "Designating a Fabric as Global Policy Master" + }, + { + "content": "\nCreating a Security Policy Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Creating a Security Policy Add a security policy on the global policy master. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Creating a Security Policy Add a security policy on the global policy master. Prerequisites You must have the permission to create a security policy. About this task Security policies can be created on a fabric that is designated as the global policy\n master. See Designating a Fabric as Global Policy Master to designate a fabric as the\n global policy master. A security policy is a common set of access permissions on the Data Fabric file\n system that can be assigned to users and/or groups, or to public (all users). The following permissions can be assigned on files and directories on the Data Fabric\n file system. Read, write, execute permissions on files Read, lookup, add child directory, delete child directory on\n directories A security policy can be assigned to volumes when tagging is allowed. See Administering Security Policies for details on values for the allow tagging and access control fields for a\n security policy. NOTE: When you allow tagging, you can assign the security policy to a volume on the\n fabric. Procedure Log on to the Data Fabric UI . Select Fabric manager from the\n dropdown on the Home page. Click Security Administration seen on the Home page. Click Create Policy on the Global policies card. Enter the Name of the security policy. Enter the Description . Select the option for Access Control . Toggle Allow Tagging to allow or disallow tagging. Click Add access permissions to add access permissions to directories\n and files for selected users or groups. To grant permission to all users and groups, turn on the Public toggle.\n To grant permissions to specific users or groups, turn off the Public toggle, and enter a comma-separated list of users or groups. Select the permissions to be granted on directories and files to the specified\n users or groups. Click Add . Click Create . Results The security policy is created and is displayed on the Global\n Policies card for the fabric. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . policy create NOTE: A newly created policy cannot be assigned\n immediately to a fabric resource such as a volume on a secondary fabric in the\n global namespace. A fabric user must wait for 5-10 minutes before the fabric user\n can assign the policy to a fabric resource on a secondary fabric. (Topic last modified: 2024-01-31) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/adding_a_security_policy.html", + "title": "Creating a Security Policy" + }, + { + "content": "\nViewing a Security Policy Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Viewing a Security Policy View security policy details. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing a Security Policy View security policy details. About this task You can view security policy details on the Data Fabric UI. Procedure Log on to the Data Fabric UI . Select Fabric manager from the\n dropdown on the Home page. Click Security Administration on the Home page. Scroll down to the Global policies card. On the list of policies, click the ellipsis seen under Actions for the\n security policy to edit. Click View details to view the security policy details. Results The security policy is displayed on the Data Fabric\n UI. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . policy info (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_security_policy.html", + "title": "Viewing a Security Policy" + }, + { + "content": "\nViewing All Security Policies Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Viewing All Security Policies View all security policies on the Data Fabric UI. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing All Security Policies View all security policies on the Data Fabric UI. About this task NOTE: The View All button is visible when you have configured five or more\n security policies on the global policy master. Use the following steps to view all security policies on the Data Fabric UI. Procedure Log on to the Data Fabric UI . Select Fabric manager from the dropdown on the Home page. Click Security Administration on the Home page. Click View All on the Global policies card. Results All existing security policies are displayed on the Data\n Fabric UI. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . policy list (Topic last modified: 2024-01-31) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_all_security_policies.html", + "title": "Viewing All Security Policies" + }, + { + "content": "\nEditing a Security Policy Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Editing a Security Policy Make changes to a security policy. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Editing a Security Policy Make changes to a security policy. Prerequisites You must be a fabric manager to perform this\n operation. About this task You can make changes to a security policy for purposes such as deprecating or\n retiring the security policy, and/or changing access permissions. Procedure Log on to the Data Fabric UI . Select Fabric manager from the\n dropdown on the Home page. Click Security Administration on the Home\n page. Scroll down to the Global policies card. On the list of security policies, click the ellipsis under Actions for\n the security policy to edit. Click Edit policy to make changes to the policy. Make the required changes. Click Save . Results The changes are saved and applied. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . policy modify (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/editing_security_policy.html", + "title": "Editing a Security Policy" + }, + { + "content": "\nAssigning a Security Policy to One or More Volumes Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Assigning a Security Policy to One or More Volumes HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Assigning a Security Policy to One or More Volumes About this task You can assign security policy to one or more volumes associated with a fabric, via\n the Data Fabric UI. Follow the steps given below to assign a security policy to one or more volumes. Procedure Log on to the Data Fabric UI . Select Fabric manager from the dropdown on the Home page. Click Security Administration on the Home page. Scroll down to the Global policies card. On the list of security policies, click the ellipsis under Actions for\n the security policy to assign to one or more volumes. Click Assign Policy . Select the fabric and one or more volumes to assign the policy to. Click Save . Results The security policy is assigned to the selected\n volumes. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . policy attach (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/assign_policy_to_one_or_more_volumes.html", + "title": "Assigning a Security Policy to One or More Volumes" + }, + { + "content": "\nAssigning Multiple Security Policies to One or More Volumes Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Prerequisites You must be a fabric manager to perform this\n operation. About this task You can assign multiple security policies to one or more volumes associated with a\n fabric at one go, via the Data Fabric UI. Follow the steps given below to assign a security policy to one or more volumes. Procedure Log on to the Data Fabric UI . Select Fabric manager from the dropdown on the Home page. Click Security Administration on the Home page. Click View All on the Global policies card. Click Assign Policy . Search for policies in the search bar and select the policies to apply to a\n volume or a common set of volumes. Click Select Resources . Select the fabric and one or more volumes to assign the selected policies\n to. Click Save . Results The selected security policies are assigned to the\n selected volumes. Related maprcli Commands To\n implement the features described on this page, the Data Fabric UI relies on the\n following maprcli command. The command is provided for general\n reference. For more information, see maprcli Commands in This Guide . policy\n attach (Topic last modified: 2024-01-31) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/assign_multiple_policies_to_volumes.html", + "title": "Assigning Multiple Security Policies to One or More Volumes" + }, + { + "content": "\nUnassigning One or More Security Policies from a Volume Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Prerequisites You must be a fabric user to perform this\n operation. About this task You can unassign a security policy from a volume to which it has been assigned. Follow the steps given below to unassign one or more security policies from a\n volume. Procedure Log on to the Data Fabric UI . Select Fabric user from the\n dropdown on the Home page. Click the Table view icon on the Resources card. In the tabular\n list of fabrics, click the down arrow for the fabric that contains the\n volume to rename. Click the volume name seen under Resource Name . Go to Settings tab for the volume. Click the edit icon for Security Policy . On the Edit Policy dialog box, deselect the check boxes for the policy\n or policies that you wish to unassign. You can use the search bar to search for\n the required policy if there are multiple policies attached to the volume. Click Save . Results The policy is unassigned from the volume. (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/unassigning_a_policy_from_a_volume.html", + "title": "Unassigning One or More Security Policies from a Volume" + }, + { + "content": "\nDisabling a Security Policy Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Disabling a Security Policy Describes how to disable a security policy. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Disabling a Security Policy Describes how to disable a security policy. Prerequisites You must be a fabric manager to perform this\n operation. About this task You can disable a security policy instead of deleting it completely from Data Fabric.\n When you disable a security policy, it does not apply to any volume that it has been\n assigned to. A disabled security policy cannot be assigned to any other volume,\n unless the policy is enabled again. Follow the steps given below to disable a security policy. Procedure Log on to the Data Fabric UI . Select Fabric manager from the\n dropdown on the Home page. Click Security Administration on the Home\n page. Scroll down to the Global policies card. On the list of security policies, click the ellipsis under Actions for\n the security policy to disable. Click Edit policy to make changes to the policy. Select Disarmed from the Access Control dropdown. Click Save . Results The security policy is disabled. You can enable the\n security policy that has been disabled. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . policy modify (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/disabling_a_security_policy.html", + "title": "Disabling a Security Policy" + }, + { + "content": "\nEnabling a Security Policy Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Security Policies Add, edit, delete, and manage state of security policies. Enabling a Security Policy Describes how to enable a security policy. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. About Security Policy Domain Describes a security policy domain. Security Policy Implementation Workflow Describes the security policy workflow, in general, and the steps in implementing a security policy. Security Policy Enforcement Process Describes the steps followed during security policy enforcement on volumes. Understanding Access Control in a Security Policy The implications of permissions assigned to users and groups in a security policy. Managing File and Directory ACEs Describes the implications of setting access control expressions on files and directories. Security Policy Permissions Permissions define which administrative users can create, view, and modify security policies. Administrators set the permissions on security policies through cluster-level and security policy-level ACLs. Designating a Fabric as Global Policy Master Designate a fabric in the global namespace as the global policy master. Creating a Security Policy Add a security policy on the global policy master. Viewing a Security Policy View security policy details. Viewing All Security Policies View all security policies on the Data Fabric UI. Editing a Security Policy Make changes to a security policy. Assigning a Security Policy to One or More Volumes Assigning Multiple Security Policies to One or More Volumes Describes how to assign multiple security policies to volumes. Unassigning One or More Security Policies from a Volume Unassign a policy from a volume to which it has been previously assigned. Disabling a Security Policy Describes how to disable a security policy. Enabling a Security Policy Describes how to enable a security policy. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Enabling a Security Policy Describes how to enable a security policy. Prerequisites You must be a fabric manager to perform this\n operation. About this task You can enable a security policy that has been recently created or has been disabled\n in the past. Once you enable a security policy, it can be assigned to a fabric\n resource such as a volume. Follow the steps given below to enable a security policy. Procedure Log on to the Data Fabric UI . Select Fabric manager from the\n dropdown on the Home page. Click Security Administration seen on the\n Home page. Scroll down to the Global policies card. Click the ellipsis under Actions for the security policy to\n enable. Click Edit policy to make changes to the policy. Select Armed from the Access Control dropdown. Click Save . Results The security policy is enabled. An enabled security policy\n can be assigned to one or more volumes. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . policy modify (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/enabling_a_security_policy.html", + "title": "Enabling a Security Policy" + }, + { + "content": "\nWorking with an External NFS Server Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Importing an External Network File System Server Import an external NFS server into Data Fabric to be able to transfer data from Data Fabric to the external NFS server to make it shareable across the clusters in the global namespace or cluster group. Viewing the IP Address/Hostname for External NFS Server View the IP address or hostname associated with an external NFS server on the Data Fabric UI. Deleting an External NFS Server Delete an external NFS server association with Data Fabric. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in\n the global namespace. An external NFS server can be used to share data across clusters in the global\n namespace. You can add an external NFS server into Data Fabric. You can transfer data on to the\n imported NFS server so as to be able to share data across clusters in the global\n namespace. Importing an External Network File System Server Import an external NFS server into Data Fabric to be able to transfer data from Data Fabric to the external NFS server to make it shareable across the clusters in the global namespace or cluster group. Viewing the IP Address/Hostname for External NFS Server View the IP address or hostname associated with an external NFS server on the Data Fabric UI. Deleting an External NFS Server Delete an external NFS server association with Data Fabric. (Topic last modified: 2023-11-06) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/working_with_external_nfs_servers.html", + "title": "Working with an External NFS Server" + }, + { + "content": "\nImporting an External Network File System Server Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Importing an External Network File System Server Import an external NFS server into Data Fabric to be able to transfer data from Data Fabric to the external NFS server to make it shareable across the clusters in the global namespace or cluster group. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Importing an External Network File System Server Import an external NFS server into Data Fabric to be able to transfer data from Data Fabric to the external NFS server to make it shareable across the clusters in the global namespace or cluster group. Viewing the IP Address/Hostname for External NFS Server View the IP address or hostname associated with an external NFS server on the Data Fabric UI. Deleting an External NFS Server Delete an external NFS server association with Data Fabric. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Importing an External Network File System Server Import an external NFS server into Data Fabric to be able to transfer data from Data\n Fabric to the external NFS server to make it shareable across the clusters in the global\n namespace or cluster group. About this task You can import an external network file system (NFS) into the global namespace so\n that the external NFS server is available and accessible to all the clusters in the\n global namespace. After you import an external NFS server, you are able to transfer\n data from the Data Fabric cluster on to the external NFS server. The data, thus,\n transferred is shareable between all clusters present in the global namespace. NOTE: NFSv4 compliant servers can be imported into Data Fabric. When you are importing an external NFS server, you can specify one of more hostnames or IP addresses that are assigned to the NFS server. If multiple network interface controllers are attached to the NFS server, the NFS server is identified by multiple IP addresses or hostnames. Follow the steps given below to import an external NFS server into Data Fabric. Procedure Log on to the Data Fabric UI . Select Fabric manager on the Home\n page. Click Global namespace . Click Import External NFS . Enter the name for the NFS server in NFS name . Enter the IP addresses or the hostnames for the external NFS server as a\n comma-separated string in IP address or hostname . Click Import . Results The NFS server is imported into Data Fabric. The NFS server\n is visible under the list of resources in the global namespace. You are able to\n transfer data to the NFS server after importing the NFS server. Related maprcli Commands To implement the features described on this page, the Data\n Fabric UI relies on the following maprcli command. The command is\n provided for general reference. For more information, see maprcli Commands in This Guide . clustergroup\n addexternal (Topic last modified: 2023-12-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/importing_an_external_network_file_system.html", + "title": "Importing an External Network File System Server" + }, + { + "content": "\nViewing the IP Address/Hostname for External NFS Server Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Viewing the IP Address/Hostname for External NFS Server View the IP address or hostname associated with an external NFS server on the Data Fabric UI. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Importing an External Network File System Server Import an external NFS server into Data Fabric to be able to transfer data from Data Fabric to the external NFS server to make it shareable across the clusters in the global namespace or cluster group. Viewing the IP Address/Hostname for External NFS Server View the IP address or hostname associated with an external NFS server on the Data Fabric UI. Deleting an External NFS Server Delete an external NFS server association with Data Fabric. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing the IP Address/Hostname for External NFS Server View the IP address or hostname associated with an external NFS server on the Data\n Fabric UI. About this task You can view the IP addresses or hostnames that are associated with an external NFS\n server. Follow the steps given below to view the list of IP address(es) or hostname(s)\n associated with an external NFS server. Procedure Log on to the Data Fabric UI . Select Fabric manager on the Home\n page. Click Global namespace . On the Table view, click the ellipsis under Actions for the external NFS\n server. Click the View IP Addresses/hostnames option under Action . Results The IP addresses or hostnames associated with the external\n NFS server are displayed. (Topic last modified: 2024-01-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/viewing_ip_address_hostname_for_external_nfs_server.html", + "title": "Viewing the IP Address/Hostname for External NFS Server" + }, + { + "content": "\nDeleting an External NFS Server Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Deleting an External NFS Server Delete an external NFS server association with Data Fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Importing an External Network File System Server Import an external NFS server into Data Fabric to be able to transfer data from Data Fabric to the external NFS server to make it shareable across the clusters in the global namespace or cluster group. Viewing the IP Address/Hostname for External NFS Server View the IP address or hostname associated with an external NFS server on the Data Fabric UI. Deleting an External NFS Server Delete an external NFS server association with Data Fabric. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Deleting an External NFS Server Delete an external NFS server association with Data Fabric. About this task You can delete the association of an external NFS server with Data Fabric via the\n Data Fabric UI. Follow the steps given below to delete an external NFS server from Data Fabric. Procedure Log on to the Data Fabric UI . Select Fabric manager on the Home\n page. Click Global namespace . On the Table view, click the icon under Action for the external NFS\n server. Click the Delete option under Action . Click Delete on the confirmation message that appears, to confirm\n deletion. Results The external NFS server entry is deleted from the Data\n Fabric UI, and the association between the external NFS server and Data Fabric is\n removed. You are unable to access or transfer data on the external NFS server via Data\n Fabric. NOTE: If you wish to access the data on the deleted NFS server, you must import the\n NFS server into Data Fabric again. Related maprcli Commands To implement the features described on\n this page, the Data Fabric UI relies on the following maprcli command. The command is provided for general reference. For more information,\n see maprcli Commands in This Guide . clustergroup remove\n cluster (Topic last modified: 2023-12-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/deleting_an_external_nfs_server.html", + "title": "Deleting an External NFS Server" + }, + { + "content": "\nWorking with External S3 Object Store Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Working with External S3 Object Store HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Importing an External S3 Object Store Describes how to import an external S3 object store into the global namespace. Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Working with External S3 Object Store You can import a third-party S3 object store into the global namespace, to be able to\n view data from the third-party S3 object store via the Data Fabric UI. Data Fabric\n supports import of Amazon S3, VAST, Scality, WEKA, and other\n such S3-compliant object stores . See S3 Federation in Global Namespace for more information. To copy data from fabrics on the global namespace to the external S3 object store,\n download the data to local folder and then upload to the external S3 object\n store. Importing an External S3 Object Store Describes how to import an external S3 object store into the global namespace. (Topic last modified: 2024-02-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/work_with_external_s3_server.html", + "title": "Working with External S3 Object Store" + }, + { + "content": "\nImporting an External S3 Object Store Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Working with External S3 Object Store Importing an External S3 Object Store Describes how to import an external S3 object store into the global namespace. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Importing an External S3 Object Store Describes how to import an external S3 object store into the global namespace. Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Importing an External S3 Object Store Describes how to import an external S3 object store into the global\n namespace. Prerequisites If Data Fabric accesses the internet via a proxy server, do\n the following. Specify the proxy settings in /opt/mapr/initscripts/mapr-s3server . export HTTPS_PROXY=\"http://:8080\"\nexport http_proxy=\"http://:8080\"\nexport HTTP_PROXY=\"http://:8080\"\nexport https_proxy=\"http://:8080\" Run the following command to restart the related Data Fabric\n component. maprcli node services -name s3server -action restart -nodes hostname -f -json Data Fabric is now able to access the internet via the proxy, and able to\n connect to an external S3 object store. About this task You can import an external S3 object store into Data Fabric to be able to transfer\n data from Data Fabric to the external S3 object store to make it shareable across\n the fabrics in the global namespace. You can import AWS S3, WEKA, Scality, VAST and other S3-compliant\n object stores into a global namespace to be able to consolidate your data across\n external S3 object stores on Data Fabric. Follow the steps given below to import an external S3 object store into Data Fabric. Procedure Log on to the Data Fabric UI . Select Fabric Manager on the home\n page. Click Global namespace . Click Import External S3 . Enter the name for the S3 object store in S3 name . Select AWS as S3 vendor to import an Amazon S3 object store. Alternatively, select Generic to import a S3 object\n store such as Scality/WEKA/VAST. If you have selected the AWS option as the S3 vendor, Enter the name of AWS S3 region. Enter the Access key to access the external S3 object\n store. Enter the Secret key to access the external S3 object\n store. If you have selected Generic option as the S3 vendors. Enter the Access key to access the external S3 object\n store. Enter the Secret key to access the external S3 object\n store. Enter the IP addresses or hostnames for the external S3 object store as\n a comma-separated string in IP address or hostname . Enter the S3 server port . The default value is 9000. Select the Use TLS Encryption check box, if you wish to\n communicate over a secure connection. By default, TLS encryption is\n enabled. Drag and drop the S3 server certificate for secure\n communication, if the generic S3 object store is not CA certified. Click Import . Results The S3 object store is imported into Data Fabric. The S3 object store is visible under the list\n of resources in the global\n namespace. Related maprcli Commands To implement the\n features described on this page, the Data Fabric UI relies on the following maprcli command. The command is provided for general\n reference. For more information, see maprcli Commands in This Guide . clustergroup\n addexternal (Topic last modified: 2024-02-01) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/importing_an_external_s3_server.html", + "title": "Importing an External S3 Object Store" + }, + { + "content": "\nAdministering Alarms Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Viewing Alarms View alarms on the overview/data_fabric_ui.html . Muting/Dismissing Alarms Mute or dimiss an alarm via HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. About this task Alarms are the alerts or notifications generated by Data Fabric. Alarms could be alerts related to errors, warnings, or information related to various\n fabric resources. Following are the types of alarms are generated by Data Fabric. Fabric alarms User alarms Node alarms Volume alarms Alarms raised by Data Fabric can be viewed, muted, or dismissed from the Data Fabric\n UI. See Viewing Alarms for the procedure to view alarms on the Data\n Fabric UI. See Muting/Dismissing Alarms to mute an alarm or dismiss an alarm via the\n Data Fabric UI. Viewing Alarms View alarms on the Data Fabric UI . Muting/Dismissing Alarms Mute or dimiss an alarm via HPE Ezmeral Data Fabric UI. (Topic last modified: 2023-05-04) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/managing_alarms.html", + "title": "Administering Alarms" + }, + { + "content": "\nViewing Alarms Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Viewing Alarms View alarms on the overview/data_fabric_ui.html . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Viewing Alarms View alarms on the overview/data_fabric_ui.html . Muting/Dismissing Alarms Mute or dimiss an alarm via HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Viewing Alarms View alarms on the Data Fabric UI . Prerequisites You must have access to the Data Fabric UI and the permission to view\n alarms. About this task Following are the types of alarms are generated by Data Fabric. Fabric alarms User alarms Node alarms Volume alarms Use the following steps to view alarms generated by various events occurring on\n Data Fabric. Procedure Log on to the Data Fabric UI . Click the bell icon at the top right corner next to the help icon. Results The list of alarms is displayed. You can mute or dismiss an alarm. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . alarm list (Topic last modified: 2023-10-18) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/view_alarms.html", + "title": "Viewing Alarms" + }, + { + "content": "\nMuting/Dismissing Alarms Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Muting/Dismissing Alarms Mute or dimiss an alarm via HPE Ezmeral Data Fabric UI. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Viewing Alarms View alarms on the overview/data_fabric_ui.html . Muting/Dismissing Alarms Mute or dimiss an alarm via HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Muting/Dismissing Alarms Mute or dimiss an alarm via HPE Ezmeral Data Fabric UI. Prerequisites You must have access to Data Fabric UI and the permission to mute or dismiss an\n alarm. About this task You can mute or dismiss an alarm that is visible on the Data Fabric UI. An alarm can\n be muted for 24 hours, 6 hours or 1 hour. Follow the steps given below to mute/dismiss an alarm. Procedure Log on to the Data Fabric UI . Click the bell icon at the top right corner next to the help icon. Click View all to view all alarms. To mute an alarm, click Mute and select the duration for which you wish\n to mute the alarm. Alternatively, click Dismiss to dismiss the\n alarm. Results The alarm is muted for the specified duration or the alarm\n is dimissed, depending on the action you have selected. Related maprcli Commands To implement the features described on this page, the\n Data Fabric UI relies on the following maprcli command. The\n command is provided for general reference. For more information, see maprcli Commands in This Guide . alarm mute (Topic last modified: 2023-10-18) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/muting_dismissing_an_alarm.html", + "title": "Muting/Dismissing Alarms" + }, + { + "content": "\nMonitoring Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Adding an OTel Endpoint Describes how to add an OTel endpoint to a fabric. Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . OpenTelemetry (OTel) is an observability framework that allows you to instrument,\n generate, collect, and export telemetry data. For more information on OTel, see the official OpenTelemetry documentation . The OTel endpoint provides centralized monitoring for your HPE Ezmeral Data Fabric deployments. Use OTel to generate metrics and\n logs for your fabrics, manage your OTel deployments through the Data Fabric UI , and view the generated telemetry data\n through EZ Central. Adding an OTel Endpoint Describes how to add an OTel endpoint to a fabric. (Topic last modified: 2023-10-27) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/monitoring.html", + "title": "Monitoring" + }, + { + "content": "\nAdding an OTel Endpoint Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Adding an OTel Endpoint Describes how to add an OTel endpoint to a fabric. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Adding an OTel Endpoint Describes how to add an OTel endpoint to a fabric. Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Adding an OTel Endpoint Describes how to add an OTel endpoint to a fabric. Prerequisites You must be a fabric manager to perform this\n operation. About this task Proceed as follows to add an OTel endpoint to a fabric: Procedure Log on to the Data Fabric UI . Click the Fabric administration tab. On the OTEL endpoints card, click Add\n endpoint . The Add OTEL endpoint side\n drawer opens. Enter the Name . Enter the URL of your OTel endpoint. If your OTel endpoint contains a port, enter the port number. To enable your OTel endpoint to return logs and/or metrics data, select Logs and/or Metrics . Click Select file to select a key file to upload.\n Alternatively, drag and drop the key file to the Upload\n files area. Click Select file to select a client certificate file to\n upload. Alternatively, drag and drop the client certificate file to the Upload files area. Click Add . Results The OTel endpoint is created on the fabric. If you\n selected Logs and/or Metrics , the OTel\n endpoint now returns the selected telemetry data for the fabric. You can vew\n the telemetry data generated for the fabric through EZ Central. (Topic last modified: 2023-10-27) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/adding_an_otel_endpoint.html", + "title": "Adding an OTel Endpoint" + }, + { + "content": "\nGetting Started with Iceberg Jump to main content Get Started Platform Administration Reference Home Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Administration This section describes how to administer fabric resources in the global namespace of your HPE Ezmeral Data Fabric as-a-service platform. IPv6 Support in Data Fabric Describes the IPv6 support feature for Data Fabric. Administering Fabrics This section describes fabric operations that you can perform using the Data Fabric UI. Administering Users and Roles This section describes the operations you can perform related to users, groups, and roles for the HPE Ezmeral Data Fabric . Administering Buckets Describes the operations you can perform related to buckets for the HPE Ezmeral Data Fabric . Administering Tables Describes the operations you can perform related to tables for HPE Ezmeral Data Fabric . Administering Topics Administer topics for Apache Kafka Wire Protocol with HPE Ezmeral Data Fabric . Administering Volumes Administer volumes on HPE Ezmeral Data Fabric . Auditing Fabric and Fabric Data Auditing in Data Fabric Administering Security Policies Add, edit, delete, and manage state of security policies. Working with an External NFS Server Associate an external NFS server with Data Fabric to share data across clusters in the global namespace. Working with External S3 Object Store Administering Alarms Manage alarms via the HPE Ezmeral Data Fabric UI. Monitoring Describes monitoring with OpenTelemetry for HPE Ezmeral Data Fabric . Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Getting Started with Iceberg Summarizes what you need to know to begin using Iceberg with HPE Ezmeral Data Fabric release 7.6.0. Version Support HPE Ezmeral Data Fabric 7.6.0 has been tested with: Iceberg 1.4.2 mapr-spark-3.3.3.0 iceberg-spark-runtime-3.3_2.12-1.4.2.jar Other data-processing engines, such as open-source Spark, PrestoDB, Flink, and\n data-processing technologies, such as Snowflake, have not been tested. Catalog Support Catalogs manage the metadata for datasets and tables in Iceberg. You must specify the\n catalog when interacting with Iceberg tables through Spark. The following built-in\n catalogs have been tested for use with Data Fabric 7.6.0: HiveCatalog HadoopCatalog Spark Setup for Iceberg Setting up Spark to use Iceberg is a two-step process: Add the org.apache.iceberg:iceberg-spark-runtime-_: jar file to your application classpath. Add the runtime to the jars folder in your spark directory.\n Add it directly to the application classpath by using the --package or --jars option. Configure a catalog. For information about using catalogs with Iceberg, see Catalogs . For examples, see the Spark and Iceberg Quickstart . Configuring Your Spark Application Consider adding the following parameters to your Spark\n application: spark.sql.catalog..type=hive\nspark.sql.catalog..warehouse=\nspark.sql.catalog.=org.apache.iceberg.spark.SparkSessionCatalog\nspark.sql.legacy.pathOptionBehavior.enabled=true (Topic last modified: 2024-01-30) On this page Version Support Catalog Support Spark Setup for Iceberg Configuring Your Spark Application \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/administration/iceberg_getting_started.html", + "title": "Getting Started with Iceberg" + }, + { + "content": "\nReference Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. (Topic last modified: 2023-05-12) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/reference_main.html", + "title": "Reference" + }, + { + "content": "\nRelease History Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Only core version 7.4.0 and later are currently supported for the as-a-service platform of\n the HPE Ezmeral Data Fabric . Date Build Version Core Version EEP Version Docker Image January 31, 2024 7.6.0.0.20240123101503.GA 7.6.0.0 9.2.1 maprtech/edf-seed-container:7.6.0_9.2.1_edf October 30, 2023 7.5.0.0.20231026222149.GA 7.5.0.0 9.2.0 maprtech/edf-seed-container:7.5.0_9.2.0_edf August 8, 2023 7.4.0.0.20230728133744.GA 7.4.0.0 9.1.2 maprtech/edf-seed-container:7.4.0_9.1.2_dfaas May 12, 2023 7.3.0.0.20230425002320.GA 7.3.0.0 9.1.1 maprtech/dev-sandbox-container:7.3.0_9.1.1_dfaas More information Viewing the Software Version (Topic last modified: 2024-02-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/release_history.html", + "title": "Release History" + }, + { + "content": "\nCloud Instance Specifications Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . AWS Cloud Instance Specifications The following table describes the AWS cloud instance for different storage tiers: Specification Storage Tier 100GB 1TB 10TB 100TB 1PB Number of Instances 1 3 5 10 15 Instance Type m6i.4xlarge m6i.4xlarge m6i.4xlarge m6i.4xlarge m6i.4xlarge Number of DF Disks 1 3 4 10 14 DF Disk Type st1 gp3 gp3 gp3 gp3 DF Disk Size 128 128 512 1024 5120 Swap Disk Type gp3 gp3 gp3 gp3 gp3 RAM 64GB 64GB 64GB 64GB 64GB CPU 16 16 16 16 16 Swap Disk Size 16 64 128 256 512 Azure Cloud Instance Specifications The following table describes the Azure cloud instance for different storage tiers: Specification Storage Tier 100GB 1TB 10TB 100TB 1PB Number of Instances 1 3 5 10 15 Instance Type Standard_B12ms Standard_B16ms Standard_B16ms Standard_B20ms Standard_B20ms Number of DF Disks 1 3 4 10 14 DF Disk Type Standard_LRS Standard_LRS Standard_LRS Standard_LRS Standard_LRS DF Disk Size 100 115 512 1024 4800 RAM 48GB 64GB 64GB 80GB 80GB CPU 12 16 16 20 20 Swap Disk Type Standard_LRS Standard_LRS Standard_LRS Standard_LRS Standard_LRS Swap Disk Size 32 32 32 32 32 GCP Cloud Instance Specifications The following table describes the GCP cloud instance for different storage tiers: Specification Storage Tier 100GB 1TB 10TB 100TB 1PB Number of Instances 1 3 5 10 15 RAM 64GB 64GB 64GB 64GB 64GB CPU 16 16 16 16 16 Instance Type n2-standard-16 n2-standard-16 n2-standard-16 n2-standard-16 n2-standard-16 Root Disk Type pd-ssd pd-ssd pd-ssd pd-ssd pd-ssd Root Disk Size 200 200 200 200 200 Number of DF Disks 2 3 4 10 14 DF Disk Type pd-standard pd-standard pd-standard pd-standard pd-standard DF Disk Size 100 115 512 1024 4880 Swap Disk Type pd-standard pd-standard pd-standard pd-standard pd-standard Swap Disk Size 32 32 32 32 32 (Topic last modified: 2023-10-30) On this page AWS Cloud Instance Specifications Azure Cloud Instance Specifications GCP Cloud Instance Specifications \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/cloud_deployment_specifications.html", + "title": "Cloud Instance Specifications" + }, + { + "content": "\nThird-Party Storage Solutions Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including\n Scality, WEKA, and VAST. The HPE Ezmeral Data Fabric 7.6.0 global namespace is compatible with the following third-party,\n object-storage solutions: Storage Product External NFS Integration with GNS External S3 Integration with GNS Using System Security(AD/LDAP) Using Kerberos Using Secret Key and Access Key HTTPS HTTP WEKA Supported Not Supported Supported Supported VAST Data on HPE Alletra Supported Supported Supported Supported Scality ARTESCA N/A* N/A* Supported Supported Scality RING Supported Supported Supported Supported Minio Server N/A* N/A* Supported Supported NFS Ganesha Supported Supported N/A* N/A* *N/A means not supported by the storage vendor. More information Scality Documentation WEKA Documentation VAST Data Documentation (Topic last modified: 2024-01-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/storage_solutions.html", + "title": "Third-Party Storage Solutions" + }, + { + "content": "\nPort Information Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Port Information Describes the ports used by HPE Ezmeral Data Fabric services. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. The following table lists the principal services, the ports they use, and the associated\n protocol. For traffic within a subnet, the port is not relevant. Hosts can communicate on any\n available port: Service Port Protocol CLDB 7222 TCP Data Fabric Gateway 7660 TCP Data Fabric Keycloak 6443 TCP Data Fabric UI 8443 TCP Fileserver 5660 TCP Fileserver 5692 TCP Installer 9443 TCP MOSS (Multithreaded Object Store Server) 9000 TCP NFS (into VPC UDP) 111 UDP NFS (into VPC TCP) 2049 TCP OpenTSDB 4242 TCP SSH 22 TCP (Topic last modified: 2024-01-07) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/port_information.html", + "title": "Port Information" + }, + { + "content": "\nmaprcli Commands in This Guide Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in\n this guide. Links to maprcli commands are provided at the bottom of some procedures in\n this guide. Clicking a link to a maprcli command opens a page in the\n customer-managed documentation website , where more detailed information for all maprcli commands is located. These commands are listed for reference purposes only. The commands are not intended to\n replace the documented Data Fabric UI procedures. In\n addition, some as-a-service features are not supported on the customer-managed platform. HPE Ezmeral Data Fabric users are encouraged to use the Data Fabric UI for all operations. Users of the as-a-service HPE Ezmeral Data Fabric generally are not encouraged to use maprcli commands when a UI control is available. However, users who are\n interested can read more about the commands in the customer-managed documentation. (Topic last modified: 2023-12-12) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/maprcli_command_information.html", + "title": "maprcli Commands in This Guide" + }, + { + "content": "\nOperating System Support Matrix Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Red Hat Enterprise Linux (64-bit) RHEL Version Release 7.6.0 Release 7.5.0 8.8 Yes Yes 8.6 Yes Yes 8.5 Yes Yes 8.4 Yes Yes 8.3 Yes Yes 8.2 Yes Yes 8.1 Yes Yes Rocky Linux (64-bit) Rocky Version Release 7.6.0 Release 7.5.0 8.5 Yes Yes 8.4 Yes Yes Ubuntu (64-bit) Ubuntu Version Release 7.6.0 Release 7.5.0 20.04 Yes Yes 18.04 Yes Yes SLES (64-bit) SLES Version Release 7.6.0 Release 7.5.0 15 SP3 Yes Yes 15 SP2 Yes Yes Oracle Enterprise Linux (OEL) OEL Version Release 7.6.0 Release 7.5.0 8.4 Yes Yes 8.3 Yes Yes 8.2 Yes Yes (Topic last modified: 2024-01-05) On this page Red Hat Enterprise Linux (64-bit) Rocky Linux (64-bit) Ubuntu (64-bit) SLES (64-bit) Oracle Enterprise Linux (OEL) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/os_matrix.html", + "title": "Operating System Support Matrix" + }, + { + "content": "\nDoc Site Available as a PDF Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the\n current release. For a given release, you can access HPE Ezmeral Data Fabric documentation as a single,\n downloadable PDF file. A PDF file of each release is compiled several weeks after the release\n becomes public and is available for download from the HPE Support Center . Here is the PDF location for the current release: HPE Ezmeral Data Fabric 7.6.0 Documentation To download the PDF from the HPE Support Center : Navigate to the Support Center home page for a Data Fabric release: HPE Ezmeral Data Fabric 7.6.0\n Documentation HPE Ezmeral Data Fabric 7.5.0\n Documentation HPE Ezmeral Data Fabric 7.4.0\n Documentation Above the right-navigation pane, click the PDF button, and select Export all content . A PDF file is downloaded to your\n workstation. IMPORTANT: PDF files are updated infrequently. They are a snapshot of the\n available information at the time the PDF was created. For the most current technical\n information, HPE recommends that you refer to the HTML pages. The HTML pages: Are updated continuously. Provide a Feedback button that enables you to submit comments\n or corrections. Can make it easier to access multimedia resources, such as product videos. (Topic last modified: 2024-02-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/doc_site_pdf.html", + "title": "Doc Site Available as a PDF" + }, + { + "content": "\nProduct Licensing Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Product Licensing Provides information related to product licensing. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Additional License Authorizations (ALA) Provides Additional License Authorizations for HPE Ezmeral Software, including HPE Ezmeral Runtime Enterprise, HPE Ezmeral ML Ops, HPE Ezmeral Data Fabric, and Open Source Software. Open-Source Software Acknowledgements (Release 7.6.0) Provides licensing information and acknowledges the use of open-source projects with HPE software. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Product Licensing Provides information related to product licensing. Additional License Authorizations (ALA) Provides Additional License Authorizations for HPE Ezmeral Software, including HPE Ezmeral Runtime Enterprise, HPE Ezmeral ML Ops, HPE Ezmeral Data Fabric, and Open Source Software. Open-Source Software Acknowledgements (Release 7.6.0) Provides licensing information and acknowledges the use of open-source projects with HPE software. (Topic last modified: 2023-04-20) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/product_licensing.html", + "title": "Product Licensing" + }, + { + "content": "\nAdditional License Authorizations (ALA) Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Product Licensing Provides information related to product licensing. Additional License Authorizations (ALA) Provides Additional License Authorizations for HPE Ezmeral Software, including HPE Ezmeral Runtime Enterprise, HPE Ezmeral ML Ops, HPE Ezmeral Data Fabric, and Open Source Software. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Additional License Authorizations (ALA) Provides Additional License Authorizations for HPE Ezmeral Software, including HPE Ezmeral Runtime Enterprise, HPE Ezmeral ML Ops, HPE Ezmeral Data Fabric, and Open Source Software. Open-Source Software Acknowledgements (Release 7.6.0) Provides licensing information and acknowledges the use of open-source projects with HPE software. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Additional License Authorizations (ALA) Provides Additional License Authorizations for HPE Ezmeral Software, including HPE\n Ezmeral Runtime Enterprise, HPE Ezmeral ML Ops, HPE Ezmeral Data Fabric, and Open Source\n Software. Additional License Authorizations for HPE Ezmeral Software (Topic last modified: 2023-05-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/overview/additional_license_authorizations.html", + "title": "Additional License Authorizations (ALA)" + }, + { + "content": "\nOpen-Source Software Acknowledgements (Release 7.6.0) Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Product Licensing Provides information related to product licensing. Open-Source Software Acknowledgements (Release 7.6.0) Provides licensing information and acknowledges the use of open-source projects with HPE software. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Additional License Authorizations (ALA) Provides Additional License Authorizations for HPE Ezmeral Software, including HPE Ezmeral Runtime Enterprise, HPE Ezmeral ML Ops, HPE Ezmeral Data Fabric, and Open Source Software. Open-Source Software Acknowledgements (Release 7.6.0) Provides licensing information and acknowledges the use of open-source projects with HPE software. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Open-Source Software Acknowledgements (Release 7.6.0) Provides licensing information and acknowledges the use of open-source projects with\n HPE software. About the NOTICE.txt File The NOTICE.txt file provides licensing information and software\n acknowledgements for open-source software used by the HPE Ezmeral Data Fabric . On a release 7.6.0 Data Fabric node, you can find the file in the /opt/mapr directory. The release 7.6.0 file contains the\n following information: Open Source Notice The Hewlett Packard Enterprise (\"HPE\") software accompanied by this notice is provided along with certain \nthird party software licensed under various open source software licenses (\"Open Source Components\"). The \nbelow list of Open Source Components includes, as applicable, copyright notices, original source code URLs and \nlicense URLs, and indicates whether HPE has modified the original source code of the Open Source Components. \nWith respect to licenses that require a particular language to be provided (such as the complete terms of the \nlicense itself), that language is included below under the first Open Source Component that is subject to such \nlicense.\n\n\nWith respect to Open Source Components licensed under the AGPL, CPL, GPL or LGPL, HPE hereby offers to provide \nupon request the source code thereof, including the HPE modifications, if any. Such modifications are \ndocumented by way of comments included in the source code files.\n\n\nIn addition to the warranty disclaimers contained in the open source licenses linked below and thus included \nherein by reference, HPE makes the following disclaimers regarding the Open Source Components on behalf of \nitself, the copyright holders, contributors, and licensors of such Open Source Components:\n\n\nTO THE FULLEST EXTENT PERMITTED UNDER APPLICABLE LAW, THE OPEN SOURCE COMPONENTS ARE PROVIDED BY THE COPYRIGHT \nHOLDERS, CONTRIBUTORS, LICENSORS, AND HPE \"AS IS\" AND ANY REPRESENTATIONS OR WARRANTIES OF ANY KIND, WHETHER \nORAL OR WRITTEN, WHETHER EXPRESS, IMPLIED, OR ARISING BY STATUTE, CUSTOM, COURSE OF DEALING, OR TRADE USAGE, \nINCLUDING WITHOUT LIMITATION THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR \nPURPOSE, AND NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT WILL THE COPYRIGHT OWNER, CONTRIBUTORS, LICENSORS, \nOR HPE BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES \n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR \nBUSINESS INTERRUPTION), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THE OPEN SOURCE COMPONENTS, \nEVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Project-Specific Copyright, Source Code, and License Information -----------------------------------------------------------\n\nHadoop\n\nCopyright (c) 2011 The Apache Software Foundation.\n\nSource code: http://hadoop.apache.org/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Hive\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Zeppelin\n\nCopyright (c) 2015 - 2016 The Apache Software Foundation\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Tez\n\nCopyright (c) 2016 The Apache Software Foundation\n\nSource code: git://git.apache.org/tez.git\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache HBase\n\nSource code: http://hbase.apache.org/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nAsync HBase\nCopyright (C) 2010-2012 The Async HBase Authors. All rights reserved.\n\nNew BSD License\n\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nApache Thrift\n\nCopyright (c) 2006-2010 The Apache Software Foundation.\n\nSource code: http://incubator.apache.org/thrift/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache RocksDB\n\nCopyright (c) 2004 The Apache Software Foundation.\n\nSource code: http://incubator.apache.org/thrift/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Kafka\n\nSource code: https://github.com/apache/kafka\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nElasticsearch\n\nCopyright 2009-2016 Elasticsearch\n\nSource code: https://github.com/elastic/elasticsearch\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nGrafana\n\nCopyright 2012-2013 Elasticsearch BV\n\nSource code: https://github.com/grafana/grafana\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nKibana\n\nCopyright 2012-2016 Elasticsearch BV\n\nSource code: https://github.com/elastic/kibana\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\ncollectd\n\nCopyright (C) 1989, 1991 Free Software Foundation\n\nSource code: https://github.com/collectd/collectd\n\nLicense: LGPL 2\nhttps://github.com/collectd/collectd/blob/master/COPYING\n\n-----------------------------------------------------------\n\nfluentd\n\nCopyright (C) 2011 FURUHASHI Sadayuki\n\nSource code: https://github.com/fluent/fluentd\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nMySQL Connector/J\n\nCopyright (C) 1989, 1991 Free Software Foundation\n\nSource code: https://github.com/mysql/mysql-connector-j\n\nLicense: LGPL 2\nhttps://github.com/mysql/mysql-connector-j/blob/release/5.1/COPYING\n\n-----------------------------------------------------------\n\nGanesha\n\nCopyright (C) 2007 Free Software Foundation, Inc.\n\nSource code: https://github.com/nfs-ganesha/nfs-ganesha\n\nLicense: LGPL 3\nhttps://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/LICENSE.txt\n\n-----------------------------------------------------------\n\nMinio\n\nCopyright (c) 2004, The Apache Software Foundation\n\nMinIO Client (C) 2014-2020 MinIO, Inc.\n\nThis product includes software developed at MinIO, Inc.\n(https://min.io/).\n\nThe MinIO project contains unmodified/modified subcomponents too with\nseparate copyright notices and license terms. Your use of the source\ncode for the these subcomponents is subject to the terms and conditions\nof the following licenses.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\ngRPC\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/grpc/grpc\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\n\n\nKafka-connect-jdbc\n Copyright (c) 2015 Confluent Inc.\nThe following libraries are included in packaged versions of this project:\n\n* SQLite JDBC Driver\n * COPYRIGHT: Copyright Taro L. Saito, David Crenshaw\n * LICENSE: licenses/LICENSE.apache2.txt\n * NOTICE: licenses/NOTICE.sqlite-jdbc.txt\n * HOMEPAGE: https://github.com/xerial/sqlite-jdbc\n\n* PostgreSQL JDBC Driver\n * COPYRIGHT: Copyright 1997-2011, PostgreSQL Global Development Group\n * LICENSE: licenses/LICENSE.bsd.txt\n * HOMEPAGE: https://jdbc.postgresql.org/\n\n* MariaDB JDBC Driver\n * COPYRIGHT: Copyright 2012 Monty Program Ab., 2009-2011, Marcus Eriksson\n * LICENSE: licenses/LICENSE.lgpl.txt\n * HOMEPAGE: https://mariadb.com/kb/en/mariadb/about-mariadb-connector-j/\n-----------------------------------------------------------\n\nkafka-connect-hdfs\nCopyright (c) 2015 Confluent Inc.\n-----------------------------------------------------------\n\n\nkafka-rest\nConfluent Community License Agreement Version 1.0\n\n\n-----------------------------------------------------------\n\nschema-registry\n\nThe project is licensed under the Confluent Community License, except for client\nlibs, which is under the Apache 2.0 license.\n\nSee LICENSE file in each subfolder for detailed license agreement.\n\n-----------------------------------------------------------\n\nKSQL\n\nConfluent Community License Agreement Version 1.0\n\nThe project is licensed under the Confluent Community License.\n\nApache, Apache Kafka, Kafka, and associated open source project names are trademarks of the Apache Software Foundation.\n\n-----------------------------------------------------------\n\nrest-utils\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n\nThe following libraries are included in packaged versions of this project:\n\n* ClassMate\n * COPYRIGHT: Copyright 2010 The Apache Software Foundation\n * LICENSE: licenses/LICENSE.apache2.txt\n * HOMEPAGE: https://github.com/cowtowncoder/java-classmate\n\n* Confluent Common\n * COPYRIGHT: Confluent Inc.\n * LICENSE: licenses/LICENSE.apache2.txt\n * HOMEPAGE: https://github.com/confluentinc/common\n\n* Hamcrest\n * COPYRIGHT: Copyright (c) 2000-2006, www.hamcrest.org\n * LICENSE: licenses/LICENSE.bsd.txt\n * HOMEPAGE: http://hamcrest.org/\n\n* Hibernate\n * COPYRIGHT: licenses/COPYRIGHT.hibernate.txt\n * LICENSE: licenses/LICENSE.apache2.txt\n * HOMEPAGE: http://hibernate.org/validator/\n\n* HK2\n * COPYRIGHT: Copyright (c) 2010-2014 Oracle and/or its affiliates. All rights reserved.\n * LICENSE: licenses/LICENSE.cddl+gpl2.html\n * HOMEPAGE: https://hk2.java.net\n\n* Jackson annotations\n * LICENSE: licenses/LICENSE.jackson-annotations.txt (Apache 2)\n * HOMEPAGE: http://github.com/FasterXML/jackson\n\n* Jackson core\n * LICENSE: licenses/LICENSE.jackson-core.txt (Apache 2)\n * NOTICE: licenses/NOTICE.jackson-core.txt\n * HOMEPAGE: http://github.com/FasterXML/jackson\n\n* Jackson databind\n * LICENSE: licenses/LICENSE.jackson-databind.txt (Apache 2)\n * NOTICE: licenses/NOTICE.jackson-databind.txt\n * HOMEPAGE: http://github.com/FasterXML/jackson\n\n* Jackson jaxrs-json-provider\n * LICENSE: licenses/LICENSE.jackson-core.txt (Apache 2)\n * NOTICE: licenses/NOTICE.jackson-core.txt\n * HOMEPAGE: http://github.com/FasterXML/jackson\n\n* Javassist\n * COPYRIGHT: Copyright (C) 1999- by Shigeru Chiba, All rights reserved.\n * LICENSE: licenses/LICENSE.javassist.txt (MPL, LGPL, Apache 2)\n * HOMEPAGE: http://www.javassist.org\n\n* javax.annotation-api, javax.el, javax.el-api, javax.inject, javax.servlet, javax.ws.rs-api, javax.validation\n * COPYRIGHT: Coypright Oracle\n * LICENSE: licenses/LICENSE.cddl+gpl2.html\n\n* JBoss Logging\n * COPYRIGHT: Copyright 2014 Red Hat, Inc.\n * LICENSE: licenses/LICENSE.apache2.txt\n * HOMEPAGE: http://www.jboss.org\n\n* Jersey\n * LICENSE: licenses/LICENSE.cddl+gpl2.html\n * HOMEPAGE: http://jersey.java.net\n\n* Jetty\n * COPYRIGHT: Copyright Mort Bay Consulting Pty Ltd unless otherwise noted\n * LICENSE: licenses/LICENSE.apache2.txt, licenses/LICENSE.epl.html\n * NOTICE: licenses/NOTICE.jetty.txt\n * HOMEPAGE: http://eclipse.org/jetty/\n\n* JUnit\n * LICENSE: licenses/LICENSE.epl.txt\n * NOTICE: licenses/NOTICE.junit.txt\n * HOMEPAGE: http://junit.org/\n\n\n\n-----------------------------------------------------------\n\n\n\nKStreams\n\nCopyright (c) 2004, The Apache Software Foundation\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nHttpComponents\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: http://hc.apache.org\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nQuartz-Scheduler Hazelcast Job Store\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/FlavioF/quartz-scheduler-hazelcast-jobstore\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nQuartz\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/quartz-scheduler/quartz\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nAWS JAVA-SDK\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://aws.amazon.com/sdk-for-java\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nZIP4J\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: http://www.lingala.net/zip4j/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nArgs4j\n\nCopyright (c) 2013, Kohsuke Kawaguchi and other contributors\n\nSource code: https://github.com/kohsuke/args4j\n\nLicense: MIT\nhttp://www.opensource.org/licenses/mit-license.php\n\n-----------------------------------------------------------\n\nCurator\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://curator.apache.org/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nHazelcast Discovery Plugin for Apache ZooKeeper\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/hazelcast/hazelcast-zookeeper\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nIntel(R) Intelligent Storage Acceleration Library\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/01org/isa-l\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nIntel(R) Intelligent Storage Acceleration Library Crypto Version\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/01org/isa-l_crypto\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nMapR-DB Client Driver for Python Application\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/mapr/maprdb-python-client\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nMapR-DB Client Driver for Node.JS Application\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/mapr/maprdb-node-client\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nMesoshpere Mesos-DNS\n\nCopyright (c) 2015, The Apache Software Foundation\n\nSource code: https://github.com/mesosphere/mesos-dns\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nJava Library for Processing JSON\n\nCopyright (c) 2015, The Apache Software Foundation\n\nSource Code: Source: https://github.com/FasterXML\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\nhttp://wiki.fasterxml.com/JacksonLicensing\n\n-----------------------------------------------------------\n\nSpring Framework\n\nSource code: https://github.com/spring-projects\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nSpring Shell\n\nSource code: https://github.com/spring-projects\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nTCMalloc\n\nNew BSD License\n\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nAntlr4 Runtime\n\nSource Code: https://github.com/antlr/antlr4/\n\nLicense: BSD License\nhttp://www.antlr.org/license.html\n\n-----------------------------------------------------------\n\nAOP Alliance\n\nSource Code: http://sourceforge.net/p/aopalliance/code/\n\nLicense: Public Domain\n\n-----------------------------------------------------------\n\nASM Java Bytecode Manipulation and Analysis Framework\n\nSource Code: http://forge.ow2.org/plugins/scmsvn/index.php?group_id=23\n\nLicense: BSD License\nhttp://forge.ow2.org/projects/asm/\n\n-----------------------------------------------------------\n\nJLine (Java Library for Handling Console Input v. 2)\n\nSource Code: https://github.com/jline/jline2\n\nLicense: BSD License\n\nhttps://github.com/jline/jline2/blob/master/LICENSE.txt\n\n-----------------------------------------------------------\n\nOpenTSDB\n\nLGPL v2.1\nhttps://github.com/OpenTSDB/opentsdb/blob/master/COPYING.LESSER\n\n-----------------------------------------------------------\n\nApache Spark\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nSnappy 1.0.5\nNew BSD License\n\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nHue\n\nCopyright (c) Cloudera\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nAnsible\n\nGPL\nhttps://github.com/ansible/ansible/blob/devel/COPYING\n\n-----------------------------------------------------------\n\nApache Drill\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\ngperftools 2.0\nNew BSD License\n\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nApache ZooKeeper\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nSource code: http://zookeeper.apache.org\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nOpen\n\nApplication Interface (OJAI)\n\nCopyright (c) 2015 The Apache Software Foundation.\n\nSource code: https://github.com/ojai/ojai\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Commons\n\nCopyright (c) 2003-2007 The Apache Software Foundation.\n\nSource code and additional copyright: http://commons.apache.org/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nGoogle Collections (Guava)\n\nCopyright (c) 2007 Google Inc.\n\nSource code: http://code.google.com/p/guava-libraries/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Tomcat\n\nCopyright (c) 1999-2011 The Apache Software Foundation.\n\nSource code: http://tomcat.apache.org\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nJetty Web Container\n\nCopyright (c) 1995-2009 Mort Bay Consulting Pty Ltd.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nOpen Json\n\nAndroid JSON library\nCopyright (C) 2010 The Android Open Source Project\n\nSource code: https://github.com/tdunning/open-json\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nJUnit\n\nLicense: Common Public License - v 1.0\nhttp://www.junit.org/license\n\n-----------------------------------------------------------\n\nlog4j\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nJavaMail\n\nCopyright (c) 1997-2011, Oracle and/or its affiliates.\n\nSource code: http://www.oracle.com/technetwork/java/index-138643.html\n\nLicense: Oracle Corporation (\"ORACLE\") ENTITLEMENT for SOFTWARE\nSee below.\n\n-----------------------------------------------------------\n\nProtocol Buffers\n\nCopyright (c) 2008 Google Inc.\n\nSource code: http://protobuf.googlecode.com\n\nLicense: New BSD License\nhttp://www.opensource.org/licenses/bsd-license.php\n\n-----------------------------------------------------------\n\nuuid - DCE compatible Universally Unique Identifier library\n\nCopyright (C) 1996, 1997, 1998 Theodore Ts'o.\n\nLicense: below.\n\n-----------------------------------------------------------\n\nMurmurHash\n\nSource code: http://code.google.com/p/smhasher/\n\nLicense: MIT License\nhttp://www.opensource.org/licenses/mit-license.php\n\n-----------------------------------------------------------\n\nEval - A Simple Expression Evaluator for Java\n\nSource code: http://java.net/projects/eval/pages/Home\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nGuava Release 11.0.1\n\nSource code: http://code.google.com/p/guava-libraries/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nsuEXEC - Apache HTTP Server Version 2.0\n\nSource code: http://httpd.apache.org/docs/2.0/suexec.html\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nLZ4 compression\n\nCopyright (C) 2011-2012, Yann Collet.\n\nSource code: http://code.google.com/p/lz4/\n\nLicense: New BSD License\nhttp://www.opensource.org/licenses/bsd-license.php\n\n-----------------------------------------------------------\n\nZLIB compression\n\nCopyright (C) 1995-2012 Jean-loup Gailly and Mark Adler\n\nSource code: http://www.zlib.net/\n\nLicense: below.\n\n-----------------------------------------------------------\n\nD3.js\n\nCopyright (c) 2012, Michael Bostock\n\nLicense: New BSD License (below)\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nc3p0 - JDBC3 Connection and Statement Pooling\n\nCopyright (c) 2012 Machinery For Change, Inc.\n\nSource code: http://www.mchange.com/projects/c3p0/index.html\n\nLicense: Lesser GNU Public License (LGPL)\nhttp://www.gnu.org/licenses/lgpl.html\n\n-----------------------------------------------------------\n\nHibernate\n\nSource code: http://www.hibernate.org/\n\nLicense: Lesser GNU Public License (LGPL) v2.1\nhttp://www.gnu.org/licenses/old-licenses/lgpl-2.1.html\n\n-----------------------------------------------------------\n\nTrove\n\nSource code: https://bitbucket.org/trove4j/trove\n\nLicense: Lesser GNU Public License (LGPL) v2.1\nhttp://www.gnu.org/licenses/old-licenses/lgpl-2.1.html\n\n-----------------------------------------------------------\n\nSOCI\n\nSource code: http://soci.sourceforge.net/\n\nLicense: Boost Software License\nhttp://www.boost.org/LICENSE_1_0.txt\n\n-----------------------------------------------------------\n\nPCRE\n\nCopyright (c) 2007-2012 Google Inc.\nCopyright (c) 2009-2012 Zoltan Herczeg\nCopyright (c) 1997-2012 University of Cambridge\n\nSource code: http://www.pcre.org/\n\nLicense: New BSD License\nhttp://www.opensource.org/licenses/bsd-license.php\n\n-----------------------------------------------------------\n\n\"react@16.14.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react@17.0.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react@18.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"prop-types@15.7.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/prop-types\",\n \"licenseUrl\": \"https://github.com/facebook/prop-types/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"prop-types@15.8.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/prop-types\",\n \"licenseUrl\": \"https://github.com/facebook/prop-types/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-bootstrap@0.32.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/react-bootstrap/react-bootstrap\",\n \"licenseUrl\": \"https://github.com/react-bootstrap/react-bootstrap/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"rxjs@5.5.12\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/reactivex/rxjs\",\n \"licenseUrl\": \"https://github.com/ReactiveX/rxjs/raw/master/LICENSE.txt\",\n\n-----------------------------------------------------------\n\n\"classnames@2.2.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/JedWatson/classnames\",\n \"licenseUrl\": \"https://github.com/JedWatson/classnames/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"classnames@2.3.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/JedWatson/classnames\",\n \"licenseUrl\": \"https://github.com/JedWatson/classnames/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-redux@7.2.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/react-redux\",\n \"licenseUrl\": \"https://github.com/reduxjs/react-redux/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-redux@7.2.8\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/react-redux\",\n \"licenseUrl\": \"https://github.com/reduxjs/react-redux/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"redux-form@8.3.8\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/redux-form/redux-form\",\n \"licenseUrl\": \"https://github.com/redux-form/redux-form/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"immutable@3.8.1\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/facebook/immutable-js\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/immutable-js/immutable-js/e96d73f7e1fbeff00d03b09aa4352e04de61abb3/LICENSE\",\n\n-----------------------------------------------------------\n\n\"moment@2.29.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/moment/moment\",\n \"licenseUrl\": \"https://github.com/moment/moment/raw/develop/LICENSE\",\n\n-----------------------------------------------------------\n\n\"graphql@14.7.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/graphql/graphql-js\",\n \"licenseUrl\": \"https://github.com/graphql/graphql-js/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"graphql@16.6.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/graphql/graphql-js\",\n \"licenseUrl\": \"https://github.com/graphql/graphql-js/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"lodash-es@14.7.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/lodash/lodash\",\n \"licenseUrl\": \"https://github.com/lodash/lodash/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-dom@16.14.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-dom@17.0.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-dom@18.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"antlr4@4.8.0\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/antlr/antlr4\",\n \"licenseUrl\": \"https://github.com/antlr/antlr4/raw/master/LICENSE.txt\",\n\n-----------------------------------------------------------\n\n\"react-router@6.15.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/remix-run/react-router\",\n \"licenseUrl\": \"https://github.com/remix-run/react-router/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-router-dom@4.2.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/remix-run/react-router\",\n \"licenseUrl\": \"https://github.com/remix-run/react-router/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-router-dom@5.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/remix-run/react-router\",\n \"licenseUrl\": \"https://github.com/remix-run/react-router/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-router-dom@6.15.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/remix-run/react-router\",\n \"licenseUrl\": \"https://github.com/remix-run/react-router/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"fbjs@0.8.16\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/fbjs\",\n \"licenseUrl\": \"https://github.com/facebook/fbjs/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"redux@4.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-highcharts@16.1.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/kirjs/react-highcharts\",\n \"licenseUrl\": \"https://github.com/kirjs/react-highcharts/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"lodash@4.17.21\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/lodash/lodash\",\n \"licenseUrl\": \"https://github.com/lodash/lodash/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"highcharts@7.2.2\"\n\n \"licenses\": \"https://www.highcharts.com/license\",\n \"repository\": \"https://github.com/highcharts/highcharts-dist\",\n\n-----------------------------------------------------------\n\n\"highcharts@9.1.0\"\n\n \"licenses\": \"https://www.highcharts.com/license\",\n \"repository\": \"https://github.com/highcharts/highcharts-dist\",\n\n-----------------------------------------------------------\n\n\"pegjs@0.10.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/pegjs/pegjs\",\n \"licenseUrl\": \"https://github.com/pegjs/pegjs/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-overlays@0.7.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/react-bootstrap/react-overlays\",\n \"licenseUrl\": \"https://github.com/react-bootstrap/react-overlays/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"jquery@3.6.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/jquery/jquery\",\n \"licenseUrl\": \"https://github.com/jquery/jquery/raw/main/LICENSE.txt\",\n\n-----------------------------------------------------------\n\n\"react-bootstrap-typeahead@1.4.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ericgio/react-bootstrap-typeahead\",\n \"licenseUrl\": \"https://github.com/ericgio/react-bootstrap-typeahead/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"@reduxjs/toolkit@1.5.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux-toolkit\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux-toolkit/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\"@reduxjs/toolkit@1.8.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux-toolkit\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux-toolkit/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-table@6.11.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/TanStack/table\",\n \"licenseUrl\": \"https://github.com/TanStack/table/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"json-structure-validator@1.2.1\"\n\n \"licenses\": \"none\",\n \"repository\": \"https://github.com/AntJanus/JSON-structure-validator\",\n\n-----------------------------------------------------------\n\n\"keycode@2.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/timoxley/keycode\",\n \"licenseUrl\": \"https://github.com/timoxley/keycode/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-intl@2.4.0\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/formatjs/formatjs\",\n\n-----------------------------------------------------------\n\n\"intl-messageformat@2.1.0\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/formatjs/formatjs\",\n\n-----------------------------------------------------------\n\n\"intl-messageformat@9.6.16\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/formatjs/formatjs\",\n\n-----------------------------------------------------------\n\n\"rc-slider@8.3.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/react-component/slider\",\n \"licenseUrl\": \"https://github.com/react-component/slider/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"graphql-tag@2.12.6\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/apollographql/graphql-tag\",\n \"licenseUrl\": \"https://github.com/apollographql/graphql-tag/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-notification-system@0.2.15\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/igorprado/react-notification-system\",\n \"licenseUrl\": \"https://github.com/igorprado/react-notification-system/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"history@4.10.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/remix-run/history\",\n \"licenseUrl\": \"https://github.com/remix-run/history/raw/dev/LICENSE\",\n\n-----------------------------------------------------------\n\n\"rc-datetime-picker@4.10.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/AllenWooooo/rc-datetime-picker\",\n \"licenseUrl\": \"https://github.com/AllenWooooo/rc-datetime-picker/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"rc-tooltip@3.4.9\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/react-component/tooltip\",\n \"licenseUrl\": \"https://github.com/react-component/tooltip/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-addons-shallow-compare@15.6.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-router-bootstrap@0.25.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/react-bootstrap/react-router-bootstrap\",\n \"licenseUrl\": \"https://github.com/react-bootstrap/react-router-bootstrap/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"redux-thunk@2.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux-thunk\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux-thunk/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"apollo-boost@0.1.28\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/apollographql/apollo-client\",\n \"licenseUrl\": \"https://github.com/apollographql/apollo-client/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"redux-observable@0.18.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/redux-observable/redux-observable\",\n \"licenseUrl\": \"https://github.com/redux-observable/redux-observable/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-router-redux@4.0.8\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reactjs/react-router-redux\",\n \"licenseUrl\": \"https://github.com/reactjs/react-router-redux/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"intl@1.2.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/andyearnshaw/Intl.js\",\n \"licenseUrl\": \"https://github.com/andyearnshaw/Intl.js/raw/master/LICENSE.txt\",\n\n-----------------------------------------------------------\n\n\"babel-polyfill@6.26.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/babel/babel/tree/master/packages/babel-polyfill\",\n \"licenseUrl\": \"https://github.com/babel/babel/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"@babel-runtime@7.21.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/babel/babel/tree/main/packages/babel-runtime\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/babel/babel/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"whatwg-fetch@2.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/github/fetch\",\n \"licenseUrl\": \"https://github.com/github/fetch/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-text-mask@5.0.2\"\n\n \"licenses\": \"Unlicense\",\n \"repository\": \"https://github.com/text-mask/text-mask\",\n \"licenseUrl\": \"https://github.com/text-mask/text-mask/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-select@1.3.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/JedWatson/react-select/tree/master/packages/react-select\",\n \"licenseUrl\": \"https://github.com/JedWatson/react-select/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-dock@0.2.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux-devtools\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux-devtools/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"css-toggle-switch@4.1.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ghinda/css-toggle-switch\",\n \"licenseUrl\": \"https://github.com/ghinda/css-toggle-switch/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"dompurify@2.3.8\"\n\n \"licenses\": \"MPL-2.0 OR Apache-2.0\",\n \"repository\": \"https://github.com/cure53/DOMPurify\",\n \"licenseUrl\": \"https://github.com/cure53/DOMPurify/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-copy-to-clipboard@5.0.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/nkbt/react-copy-to-clipboard\",\n \"licenseUrl\": \"https://github.com/nkbt/react-copy-to-clipboard/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-duallist@1.1.6\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/jyotirmaybanerjee/react-duallist\",\n \"licenseUrl\": \"https://github.com/jyotirmaybanerjee/react-duallist/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"redux-devtools-extension@2.13.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/zalmoxisus/redux-devtools-extension\",\n \"licenseUrl\": \"https://github.com/zalmoxisus/redux-devtools-extension/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"axios-mock-adapter@1.19.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ctimmerm/axios-mock-adapter\",\n \"licenseUrl\": \"https://github.com/ctimmerm/axios-mock-adapter/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"axios@0.21.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/axios/axios\",\n \"licenseUrl\": \"https://github.com/axios/axios/raw/v0.x/LICENSE\",\n\n-----------------------------------------------------------\n\n\"axios@0.27.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/axios/axios\",\n \"licenseUrl\": \"https://github.com/axios/axios/raw/v0.x/LICENSE\",\n\n-----------------------------------------------------------\n\n\"codemirror@5.62.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/codemirror/basic-setup\",\n \"licenseUrl\": \"https://github.com/codemirror/basic-setup/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"codemirror@5.65.12\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/codemirror/basic-setup\",\n \"licenseUrl\": \"https://github.com/codemirror/basic-setup/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet-icons@4.9.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet-icons\",\n \"licenseUrl\": \"https://github.com/grommet/grommet-icons/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet-icons@4.10.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet-icons\",\n \"licenseUrl\": \"https://github.com/grommet/grommet-icons/raw/master/LICENSE\",\n-----------------------------------------------------------\n\n\"grommet@2.25.1\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet\",\n \"licenseUrl\": \"https://github.com/grommet/grommet/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet@2.31.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet\",\n \"licenseUrl\": \"https://github.com/grommet/grommet/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"highcharts-react-official@3.0.0\"\n\n \"licenses\": \"https://github.com/highcharts/highcharts-react/raw/master/LICENSE\",\n \"repository\": \"https://github.com/highcharts/highcharts-react\",\n\n-----------------------------------------------------------\n\n\"react-codemirror2@7.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/scniro/react-codemirror2\",\n \"licenseUrl\": \"https://github.com/scniro/react-codemirror2/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"styled-components@5.3.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/styled-components/styled-components\",\n \"licenseUrl\": \"https://github.com/styled-components/styled-components/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"styled-components@5.3.9\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/styled-components/styled-components\",\n \"licenseUrl\": \"https://github.com/styled-components/styled-components/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet-theme-hpe@3.2.1\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet-theme-hpe\",\n \"licenseUrl\": \"https://github.com/grommet/grommet-theme-hpe/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"uuid@8.3.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/uuidjs/uuid\",\n \"licenseUrl\": \"https://github.com/uuidjs/uuid/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"uuid@9.0.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/uuidjs/uuid\",\n \"licenseUrl\": \"https://github.com/uuidjs/uuid/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"use-debounce@7.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/xnimorz/use-debounce\",\n \"licenseUrl\": \"https://github.com/xnimorz/use-debounce/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"deep-equal@1.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/node-deep-equal\",\n \"licenseUrl\": \"https://github.com/inspect-js/node-deep-equal/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-d3@0.4.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/esbullington/react-d3\",\n \"licenseUrl\": \"https://github.com/esbullington/react-d3/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-immutable-proptypes@2.1.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/HurricaneJames/react-immutable-proptypes\",\n \"licenseUrl\": \"https://github.com/HurricaneJames/react-immutable-proptypes/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"swagger-ui-dist@3.23.11\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/swagger-api/swagger-ui\",\n \"licenseUrl\": \"https://github.com/swagger-api/swagger-ui/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"swagger-ui-themes@3.0.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ostranme/swagger-ui-themes\",\n\n-----------------------------------------------------------\n\n\"deepmerge@4.3.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/TehShrike/deepmerge\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/TehShrike/deepmerge/master/license.txt\",\n\n-----------------------------------------------------------\n\n\"exenv@1.2.2\"\n\n \"licenses\": \"BSD\",\n \"repository\": \"https://github.com/JedWatson/exenv\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/JedWatson/exenv/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet-styles@0.2.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet-styles\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/grommet/grommet-styles/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"hoist-non-react-statics@3.3.2\"\n\n \"licenses\": \"BSD\",\n \"repository\": \"https://github.com/mridgway/hoist-non-react-statics\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/mridgway/hoist-non-react-statics/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"object-assign@4.1.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/sindresorhus/object-assign\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/sindresorhus/object-assign/main/license\",\n\n-----------------------------------------------------------\n\n\"react-fast-compare@3.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/FormidableLabs/react-fast-compare\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/FormidableLabs/react-fast-compare/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-is@18.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/facebook/react/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-joyride@2.5.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/gilbarbara/react-joyride\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/gilbarbara/react-joyride/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-proptype-conditional-require@1.0.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/beefancohen/react-proptype-conditional-require\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/beefancohen/react-proptype-conditional-require/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-query@3.39.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/TanStack/query\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/TanStack/query/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-side-effect@2.1.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/gaearon/react-side-effect\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/gaearon/react-side-effect/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"scheduler@0.23.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"scroll@3.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/michaelrhodes/scroll\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/michaelrhodes/scroll/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"scrollparent@2.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/olahol/scrollparent.js\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/olahol/scrollparent.js/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"shallowequal@1.1.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/dashed/shallowequal\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/dashed/shallowequal/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"@mswjs/cookies@0.2.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/mswjs/cookies\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/mswjs/cookies/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"@open-draft/until@1.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/open-draft/until\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/open-draft/until/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"@xmldom/xmldom@0.8.7\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/xmldom/xmldom\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/xmldom/xmldom/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"available-typed-arrays@1.0.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/available-typed-arrays\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/available-typed-arrays/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"base64-js@1.5.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/beatgammit/base64-js\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/beatgammit/base64-js/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"buffer@6.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/feross/buffer\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/feross/buffer/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"call-bind@1.0.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ljharb/call-bind\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/ljharb/call-bind/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"cookie@0.4.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/jshttp/cookie\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/jshttp/cookie/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"debug@4.3.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/debug-js/debug\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/debug-js/debug/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"esprima@4.0.1\"\n\n \"licenses\": \"BSD-2-Clause\",\n \"repository\": \"https://github.com/jquery/esprima\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/jquery/esprima/main/LICENSE.BSD\",\n\n-----------------------------------------------------------\n\n\"events@3.3.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/browserify/events\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/browserify/events/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"for-each@0.3.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/Raynos/for-each\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/Raynos/for-each/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"function-bind@1.1.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/Raynos/function-bind\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/Raynos/function-bind/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"get-intrinsic@1.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ljharb/get-intrinsic\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/ljharb/get-intrinsic/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"gopd@1.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ljharb/gopd\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/ljharb/gopd/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"has-symbols@1.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/has-symbols\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/has-symbols/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"has-tostringtag@1.0.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/has-tostringtag\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/has-tostringtag/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"has@1.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/tarruda/has\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/tarruda/has/master/LICENSE-MIT\",\n\n-----------------------------------------------------------\n\n\"headers-polyfill@3.1.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/mswjs/headers-polyfill\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/mswjs/headers-polyfill/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"ieee754@1.2.1\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/feross/ieee754\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/feross/ieee754/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"inherits@2.0.4\"\n\n \"licenses\": \"ISC\",\n \"repository\": \"https://github.com/isaacs/inherits\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/isaacs/inherits/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"is-arguments@1.1.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/is-arguments\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/is-arguments/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"is-callable@1.2.7\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/is-callable\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/is-callable/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"is-generator-function@1.0.10\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/is-generator-function\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/is-generator-function/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"is-node-process@1.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/mswjs/is-node-process\",\n\n-----------------------------------------------------------\n\n\"is-typed-array@1.1.10\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/is-typed-array\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/is-typed-array/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"js-levenshtein@1.1.6\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/gustf/js-levenshtein\",\n \"licenseUrl\": \"https://github.com/gustf/js-levenshtein/blob/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"js-yaml@3.14.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/nodeca/js-yaml\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/nodeca/js-yaml/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"ms@2.1.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/vercel/ms\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/vercel/ms/master/license.md\",\n\n-----------------------------------------------------------\n\n\"msw@1.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/mswjs/msw\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/mswjs/msw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"node-fetch@2.6.9\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/node-fetch/node-fetch\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/node-fetch/node-fetch/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"outvariant@1.4.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/open-draft/outvariant\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/open-draft/outvariant/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"path-to-regexp@6.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/pillarjs/path-to-regexp\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/pillarjs/path-to-regexp/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"set-cookie-parser@2.6.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/nfriedly/set-cookie-parser\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/nfriedly/set-cookie-parser/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"strict-event-emitter@0.4.6\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/open-draft/strict-event-emitter\",\n\n-----------------------------------------------------------\n\n\"util@0.12.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/browserify/node-util\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/browserify/node-util/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"web-encoding@1.1.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/gozala/web-encoding\",\n\n-----------------------------------------------------------\n\n\"which-typed-array@1.1.9\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/which-typed-array\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/which-typed-array/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-toastify@9.1.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/fkhadra/react-toastify\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/fkhadra/react-toastify/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-syntax-highlighter@15.5.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/react-syntax-highlighter/react-syntax-highlighter\",\n \"licenseUrl\": \"https://github.com/react-syntax-highlighter/react-syntax-highlighter/blob/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"character-entities@1.2.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/character-entities\",\n \"licenseUrl\": \"https://github.com/wooorm/character-entities/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"character-entities-legacy@1.1.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/character-entities-legacy\",\n \"licenseUrl\": \"https://github.com/wooorm/character-entities-legacy/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"character-reference-invalid@1.1.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/character-reference-invalid\",\n \"licenseUrl\": \"https://github.com/wooorm/character-reference-invalid/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"comma-separated-tokens@1.0.8\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/comma-separated-tokens\",\n \"licenseUrl\": \"https://github.com/wooorm/comma-separated-tokens/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"hast-util-parse-selector@2.2.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/syntax-tree/hast-util-parse-selector\",\n \"licenseUrl\": \"https://github.com/syntax-tree/hast-util-parse-selector/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"hastscript@6.0.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/syntax-tree/hastscript\",\n \"licenseUrl\": \"https://github.com/syntax-tree/hastscript/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"is-alphabetical@1.0.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/is-alphabetical\",\n \"licenseUrl\": \"https://github.com/wooorm/is-alphabetical/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"is-alphanumerical@1.0.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/is-alphanumerical\",\n \"licenseUrl\": \"https://github.com/wooorm/is-alphanumerical/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"is-decimal@1.0.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/is-decimal\",\n \"licenseUrl\": \"https://github.com/wooorm/is-decimal/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"is-hexadecimal@1.0.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/is-hexadecimal\",\n \"licenseUrl\": \"https://github.com/wooorm/is-hexadecimal/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"parse-entities@2.0.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/parse-entities\",\n \"licenseUrl\": \"https://github.com/wooorm/parse-entities/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"prismjs@1.29.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/PrismJS/prism\",\n \"licenseUrl\": \"https://github.com/PrismJS/prism/blob/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"property-information@5.6.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/property-information\",\n \"licenseUrl\": \"https://github.com/wooorm/property-information/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"refractor@3.6.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/refractor\",\n \"licenseUrl\": \"https://github.com/wooorm/refractor/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"space-separated-tokens@1.1.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/wooorm/space-separated-tokens\",\n \"licenseUrl\": \"https://github.com/wooorm/space-separated-tokens/blob/main/license\",\n\n-----------------------------------------------------------\n\n\"xtend@4.0.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/Raynos/xtend\",\n \"licenseUrl\": \"https://github.com/Raynos/xtend/blob/master/LICENSE\",\n\n-----------------------------------------------------------\n\ncommons-beanutils\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\ncommons-configuration\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\njoda-time\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\njna\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\ncommons-lang\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nehcache-core\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nannotations\n\nLicense: GNU Lesser Public License\nhttp://www.gnu.org/licenses/lgpl.html\n\n-----------------------------------------------------------\n\nhazelcast\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\njersey-server\n\nLicense: CDDL+GPL License\nhttp://glassfish.java.net/public/CDDL+GPL_1_1.html\n\n-----------------------------------------------------------\n\nlibpam4j\n\nLicense: The MIT license\nhttp://www.opensource.org/licenses/mit-license.php\n\n-----------------------------------------------------------\n\nlombok\n\nLicense: The MIT License\nhttps://projectlombok.org/LICENSE\n\n-----------------------------------------------------------\n\nspring-security-core\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nspring-security-kerberos-core\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nswagger-annotations\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Ranger\n\nCopyright 2014-2022 The Apache Software Foundation\n\nLicense: Apache License Version 2.0, January 2004\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nApache NiFi\n\nCopyright 2014-2022 The Apache Software Foundation\n\nLicense: Apache License Version 2.0, January 2004 \nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nApache Airflow\n\nCopyright 2016-2021 The Apache Software Foundation\n\nLicense: Apache License Version 2.0, January 2004 \nhttp://www.apache.org/licenses/LICENSE-2.0\n\n\n=========================================================== Hadoop\n\nCopyright (c) 2011 The Apache Software Foundation.\n\nSource code: http://hadoop.apache.org/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Hive\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Zeppelin\n\nCopyright (c) 2015 - 2016 The Apache Software Foundation\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Tez\n\nCopyright (c) 2016 The Apache Software Foundation\n\nSource code: git://git.apache.org/tez.git\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache HBase\n\nSource code: http://hbase.apache.org/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nAsync HBase\nCopyright (C) 2010-2012 The Async HBase Authors. All rights reserved.\n\nNew BSD License\n\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nApache Thrift\n\nCopyright (c) 2006-2010 The Apache Software Foundation.\n\nSource code: http://incubator.apache.org/thrift/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache RocksDB\n\nCopyright (c) 2004 The Apache Software Foundation.\n\nSource code: http://incubator.apache.org/thrift/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Kafka\n\nSource code: https://github.com/apache/kafka\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nElasticsearch\n\nCopyright 2009-2016 Elasticsearch\n\nSource code: https://github.com/elastic/elasticsearch\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nGrafana\n\nCopyright 2012-2013 Elasticsearch BV\n\nSource code: https://github.com/grafana/grafana\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nKibana\n\nCopyright 2012-2016 Elasticsearch BV\n\nSource code: https://github.com/elastic/kibana\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\ncollectd\n\nCopyright (C) 1989, 1991 Free Software Foundation\n\nSource code: https://github.com/collectd/collectd\n\nLicense: LGPL 2\nhttps://github.com/collectd/collectd/blob/master/COPYING\n\n-----------------------------------------------------------\n\nfluentd\n\nCopyright (C) 2011 FURUHASHI Sadayuki\n\nSource code: https://github.com/fluent/fluentd\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nMySQL Connector/J\n\nCopyright (C) 1989, 1991 Free Software Foundation\n\nSource code: https://github.com/mysql/mysql-connector-j\n\nLicense: LGPL 2\nhttps://github.com/mysql/mysql-connector-j/blob/release/5.1/COPYING\n\n-----------------------------------------------------------\n\nGanesha\n\nCopyright (C) 2007 Free Software Foundation, Inc.\n\nSource code: https://github.com/nfs-ganesha/nfs-ganesha\n\nLicense: LGPL 3\nhttps://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/LICENSE.txt\n\n-----------------------------------------------------------\n\nMinio\n\nCopyright (c) 2004, The Apache Software Foundation\n\nMinIO Client (C) 2014-2020 MinIO, Inc.\n\nThis product includes software developed at MinIO, Inc.\n(https://min.io/).\n\nThe MinIO project contains unmodified/modified subcomponents too with\nseparate copyright notices and license terms. Your use of the source\ncode for the these subcomponents is subject to the terms and conditions\nof the following licenses.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\ngRPC\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/grpc/grpc\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\n\n\nKafka-connect-jdbc\n Copyright (c) 2015 Confluent Inc.\nThe following libraries are included in packaged versions of this project:\n\n* SQLite JDBC Driver\n * COPYRIGHT: Copyright Taro L. Saito, David Crenshaw\n * LICENSE: licenses/LICENSE.apache2.txt\n * NOTICE: licenses/NOTICE.sqlite-jdbc.txt\n * HOMEPAGE: https://github.com/xerial/sqlite-jdbc\n\n* PostgreSQL JDBC Driver\n * COPYRIGHT: Copyright 1997-2011, PostgreSQL Global Development Group\n * LICENSE: licenses/LICENSE.bsd.txt\n * HOMEPAGE: https://jdbc.postgresql.org/\n\n* MariaDB JDBC Driver\n * COPYRIGHT: Copyright 2012 Monty Program Ab., 2009-2011, Marcus Eriksson\n * LICENSE: licenses/LICENSE.lgpl.txt\n * HOMEPAGE: https://mariadb.com/kb/en/mariadb/about-mariadb-connector-j/\n-----------------------------------------------------------\n\nkafka-connect-hdfs\nCopyright (c) 2015 Confluent Inc.\n-----------------------------------------------------------\n\n\nkafka-rest\nConfluent Community License Agreement Version 1.0\n\n\n-----------------------------------------------------------\n\nschema-registry\n\nThe project is licensed under the Confluent Community License, except for client\nlibs, which is under the Apache 2.0 license.\n\nSee LICENSE file in each subfolder for detailed license agreement.\n\n-----------------------------------------------------------\n\nKSQL\n\nConfluent Community License Agreement Version 1.0\n\nThe project is licensed under the Confluent Community License.\n\nApache, Apache Kafka, Kafka, and associated open source project names are trademarks of the Apache Software Foundation.\n\n-----------------------------------------------------------\n\nrest-utils\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n\nThe following libraries are included in packaged versions of this project:\n\n* ClassMate\n * COPYRIGHT: Copyright 2010 The Apache Software Foundation\n * LICENSE: licenses/LICENSE.apache2.txt\n * HOMEPAGE: https://github.com/cowtowncoder/java-classmate\n\n* Confluent Common\n * COPYRIGHT: Confluent Inc.\n * LICENSE: licenses/LICENSE.apache2.txt\n * HOMEPAGE: https://github.com/confluentinc/common\n\n* Hamcrest\n * COPYRIGHT: Copyright (c) 2000-2006, www.hamcrest.org\n * LICENSE: licenses/LICENSE.bsd.txt\n * HOMEPAGE: http://hamcrest.org/\n\n* Hibernate\n * COPYRIGHT: licenses/COPYRIGHT.hibernate.txt\n * LICENSE: licenses/LICENSE.apache2.txt\n * HOMEPAGE: http://hibernate.org/validator/\n\n* HK2\n * COPYRIGHT: Copyright (c) 2010-2014 Oracle and/or its affiliates. All rights reserved.\n * LICENSE: licenses/LICENSE.cddl+gpl2.html\n * HOMEPAGE: https://hk2.java.net\n\n* Jackson annotations\n * LICENSE: licenses/LICENSE.jackson-annotations.txt (Apache 2)\n * HOMEPAGE: http://github.com/FasterXML/jackson\n\n* Jackson core\n * LICENSE: licenses/LICENSE.jackson-core.txt (Apache 2)\n * NOTICE: licenses/NOTICE.jackson-core.txt\n * HOMEPAGE: http://github.com/FasterXML/jackson\n\n* Jackson databind\n * LICENSE: licenses/LICENSE.jackson-databind.txt (Apache 2)\n * NOTICE: licenses/NOTICE.jackson-databind.txt\n * HOMEPAGE: http://github.com/FasterXML/jackson\n\n* Jackson jaxrs-json-provider\n * LICENSE: licenses/LICENSE.jackson-core.txt (Apache 2)\n * NOTICE: licenses/NOTICE.jackson-core.txt\n * HOMEPAGE: http://github.com/FasterXML/jackson\n\n* Javassist\n * COPYRIGHT: Copyright (C) 1999- by Shigeru Chiba, All rights reserved.\n * LICENSE: licenses/LICENSE.javassist.txt (MPL, LGPL, Apache 2)\n * HOMEPAGE: http://www.javassist.org\n\n* javax.annotation-api, javax.el, javax.el-api, javax.inject, javax.servlet, javax.ws.rs-api, javax.validation\n * COPYRIGHT: Coypright Oracle\n * LICENSE: licenses/LICENSE.cddl+gpl2.html\n\n* JBoss Logging\n * COPYRIGHT: Copyright 2014 Red Hat, Inc.\n * LICENSE: licenses/LICENSE.apache2.txt\n * HOMEPAGE: http://www.jboss.org\n\n* Jersey\n * LICENSE: licenses/LICENSE.cddl+gpl2.html\n * HOMEPAGE: http://jersey.java.net\n\n* Jetty\n * COPYRIGHT: Copyright Mort Bay Consulting Pty Ltd unless otherwise noted\n * LICENSE: licenses/LICENSE.apache2.txt, licenses/LICENSE.epl.html\n * NOTICE: licenses/NOTICE.jetty.txt\n * HOMEPAGE: http://eclipse.org/jetty/\n\n* JUnit\n * LICENSE: licenses/LICENSE.epl.txt\n * NOTICE: licenses/NOTICE.junit.txt\n * HOMEPAGE: http://junit.org/\n\n\n\n-----------------------------------------------------------\n\n\n\nKStreams\n\nCopyright (c) 2004, The Apache Software Foundation\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nHttpComponents\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: http://hc.apache.org\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nQuartz-Scheduler Hazelcast Job Store\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/FlavioF/quartz-scheduler-hazelcast-jobstore\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nQuartz\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/quartz-scheduler/quartz\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nAWS JAVA-SDK\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://aws.amazon.com/sdk-for-java\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nZIP4J\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: http://www.lingala.net/zip4j/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nArgs4j\n\nCopyright (c) 2013, Kohsuke Kawaguchi and other contributors\n\nSource code: https://github.com/kohsuke/args4j\n\nLicense: MIT\nhttp://www.opensource.org/licenses/mit-license.php\n\n-----------------------------------------------------------\n\nCurator\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://curator.apache.org/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nHazelcast Discovery Plugin for Apache ZooKeeper\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/hazelcast/hazelcast-zookeeper\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nIntel(R) Intelligent Storage Acceleration Library\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/01org/isa-l\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nIntel(R) Intelligent Storage Acceleration Library Crypto Version\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/01org/isa-l_crypto\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nMapR-DB Client Driver for Python Application\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/mapr/maprdb-python-client\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nMapR-DB Client Driver for Node.JS Application\n\nCopyright (c) 2004, The Apache Software Foundation\n\nSource code: https://github.com/mapr/maprdb-node-client\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nMesoshpere Mesos-DNS\n\nCopyright (c) 2015, The Apache Software Foundation\n\nSource code: https://github.com/mesosphere/mesos-dns\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nJava Library for Processing JSON\n\nCopyright (c) 2015, The Apache Software Foundation\n\nSource Code: Source: https://github.com/FasterXML\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\nhttp://wiki.fasterxml.com/JacksonLicensing\n\n-----------------------------------------------------------\n\nSpring Framework\n\nSource code: https://github.com/spring-projects\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nSpring Shell\n\nSource code: https://github.com/spring-projects\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nTCMalloc\n\nNew BSD License\n\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nAntlr4 Runtime\n\nSource Code: https://github.com/antlr/antlr4/\n\nLicense: BSD License\nhttp://www.antlr.org/license.html\n\n-----------------------------------------------------------\n\nAOP Alliance\n\nSource Code: http://sourceforge.net/p/aopalliance/code/\n\nLicense: Public Domain\n\n-----------------------------------------------------------\n\nASM Java Bytecode Manipulation and Analysis Framework\n\nSource Code: http://forge.ow2.org/plugins/scmsvn/index.php?group_id=23\n\nLicense: BSD License\nhttp://forge.ow2.org/projects/asm/\n\n-----------------------------------------------------------\n\nJLine (Java Library for Handling Console Input v. 2)\n\nSource Code: https://github.com/jline/jline2\n\nLicense: BSD License\n\nhttps://github.com/jline/jline2/blob/master/LICENSE.txt\n\n-----------------------------------------------------------\n\nOpenTSDB\n\nLGPL v2.1\nhttps://github.com/OpenTSDB/opentsdb/blob/master/COPYING.LESSER\n\n-----------------------------------------------------------\n\nApache Spark\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nSnappy 1.0.5\nNew BSD License\n\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nHue\n\nCopyright (c) Cloudera\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nAnsible\n\nGPL\nhttps://github.com/ansible/ansible/blob/devel/COPYING\n\n-----------------------------------------------------------\n\nApache Drill\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\ngperftools 2.0\nNew BSD License\n\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nApache ZooKeeper\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nSource code: http://zookeeper.apache.org\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nOpen\n\nApplication Interface (OJAI)\n\nCopyright (c) 2015 The Apache Software Foundation.\n\nSource code: https://github.com/ojai/ojai\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Commons\n\nCopyright (c) 2003-2007 The Apache Software Foundation.\n\nSource code and additional copyright: http://commons.apache.org/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nGoogle Collections (Guava)\n\nCopyright (c) 2007 Google Inc.\n\nSource code: http://code.google.com/p/guava-libraries/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Tomcat\n\nCopyright (c) 1999-2011 The Apache Software Foundation.\n\nSource code: http://tomcat.apache.org\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nJetty Web Container\n\nCopyright (c) 1995-2009 Mort Bay Consulting Pty Ltd.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nOpen Json\n\nAndroid JSON library\nCopyright (C) 2010 The Android Open Source Project\n\nSource code: https://github.com/tdunning/open-json\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nJUnit\n\nLicense: Common Public License - v 1.0\nhttp://www.junit.org/license\n\n-----------------------------------------------------------\n\nlog4j\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nJavaMail\n\nCopyright (c) 1997-2011, Oracle and/or its affiliates.\n\nSource code: http://www.oracle.com/technetwork/java/index-138643.html\n\nLicense: Oracle Corporation (\"ORACLE\") ENTITLEMENT for SOFTWARE\nSee below.\n\n-----------------------------------------------------------\n\nProtocol Buffers\n\nCopyright (c) 2008 Google Inc.\n\nSource code: http://protobuf.googlecode.com\n\nLicense: New BSD License\nhttp://www.opensource.org/licenses/bsd-license.php\n\n-----------------------------------------------------------\n\nuuid - DCE compatible Universally Unique Identifier library\n\nCopyright (C) 1996, 1997, 1998 Theodore Ts'o.\n\nLicense: below.\n\n-----------------------------------------------------------\n\nMurmurHash\n\nSource code: http://code.google.com/p/smhasher/\n\nLicense: MIT License\nhttp://www.opensource.org/licenses/mit-license.php\n\n-----------------------------------------------------------\n\nEval - A Simple Expression Evaluator for Java\n\nSource code: http://java.net/projects/eval/pages/Home\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nGuava Release 11.0.1\n\nSource code: http://code.google.com/p/guava-libraries/\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nsuEXEC - Apache HTTP Server Version 2.0\n\nSource code: http://httpd.apache.org/docs/2.0/suexec.html\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nLZ4 compression\n\nCopyright (C) 2011-2012, Yann Collet.\n\nSource code: http://code.google.com/p/lz4/\n\nLicense: New BSD License\nhttp://www.opensource.org/licenses/bsd-license.php\n\n-----------------------------------------------------------\n\nZLIB compression\n\nCopyright (C) 1995-2012 Jean-loup Gailly and Mark Adler\n\nSource code: http://www.zlib.net/\n\nLicense: below.\n\n-----------------------------------------------------------\n\nD3.js\n\nCopyright (c) 2012, Michael Bostock\n\nLicense: New BSD License (below)\nhttp://opensource.org/licenses/BSD-3-Clause\n\n-----------------------------------------------------------\n\nc3p0 - JDBC3 Connection and Statement Pooling\n\nCopyright (c) 2012 Machinery For Change, Inc.\n\nSource code: http://www.mchange.com/projects/c3p0/index.html\n\nLicense: Lesser GNU Public License (LGPL)\nhttp://www.gnu.org/licenses/lgpl.html\n\n-----------------------------------------------------------\n\nHibernate\n\nSource code: http://www.hibernate.org/\n\nLicense: Lesser GNU Public License (LGPL) v2.1\nhttp://www.gnu.org/licenses/old-licenses/lgpl-2.1.html\n\n-----------------------------------------------------------\n\nTrove\n\nSource code: https://bitbucket.org/trove4j/trove\n\nLicense: Lesser GNU Public License (LGPL) v2.1\nhttp://www.gnu.org/licenses/old-licenses/lgpl-2.1.html\n\n-----------------------------------------------------------\n\nSOCI\n\nSource code: http://soci.sourceforge.net/\n\nLicense: Boost Software License\nhttp://www.boost.org/LICENSE_1_0.txt\n\n-----------------------------------------------------------\n\nPCRE\n\nCopyright (c) 2007-2012 Google Inc.\nCopyright (c) 2009-2012 Zoltan Herczeg\nCopyright (c) 1997-2012 University of Cambridge\n\nSource code: http://www.pcre.org/\n\nLicense: New BSD License\nhttp://www.opensource.org/licenses/bsd-license.php\n\n-----------------------------------------------------------\n\n\"react@16.14.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react@17.0.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react@18.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"prop-types@15.7.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/prop-types\",\n \"licenseUrl\": \"https://github.com/facebook/prop-types/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"prop-types@15.8.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/prop-types\",\n \"licenseUrl\": \"https://github.com/facebook/prop-types/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-bootstrap@0.32.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/react-bootstrap/react-bootstrap\",\n \"licenseUrl\": \"https://github.com/react-bootstrap/react-bootstrap/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"rxjs@5.5.12\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/reactivex/rxjs\",\n \"licenseUrl\": \"https://github.com/ReactiveX/rxjs/raw/master/LICENSE.txt\",\n\n-----------------------------------------------------------\n\n\"classnames@2.2.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/JedWatson/classnames\",\n \"licenseUrl\": \"https://github.com/JedWatson/classnames/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"classnames@2.3.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/JedWatson/classnames\",\n \"licenseUrl\": \"https://github.com/JedWatson/classnames/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-redux@7.2.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/react-redux\",\n \"licenseUrl\": \"https://github.com/reduxjs/react-redux/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-redux@7.2.8\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/react-redux\",\n \"licenseUrl\": \"https://github.com/reduxjs/react-redux/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"redux-form@8.3.8\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/redux-form/redux-form\",\n \"licenseUrl\": \"https://github.com/redux-form/redux-form/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"immutable@3.8.1\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/facebook/immutable-js\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/immutable-js/immutable-js/e96d73f7e1fbeff00d03b09aa4352e04de61abb3/LICENSE\",\n\n-----------------------------------------------------------\n\n\"moment@2.29.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/moment/moment\",\n \"licenseUrl\": \"https://github.com/moment/moment/raw/develop/LICENSE\",\n\n-----------------------------------------------------------\n\n\"graphql@14.7.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/graphql/graphql-js\",\n \"licenseUrl\": \"https://github.com/graphql/graphql-js/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"graphql@16.6.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/graphql/graphql-js\",\n \"licenseUrl\": \"https://github.com/graphql/graphql-js/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"lodash-es@14.7.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/lodash/lodash\",\n \"licenseUrl\": \"https://github.com/lodash/lodash/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-dom@16.14.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-dom@17.0.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-dom@18.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"antlr4@4.8.0\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/antlr/antlr4\",\n \"licenseUrl\": \"https://github.com/antlr/antlr4/raw/master/LICENSE.txt\",\n\n-----------------------------------------------------------\n\n\"react-router-dom@4.2.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/remix-run/react-router\",\n \"licenseUrl\": \"https://github.com/remix-run/react-router/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-router-dom@5.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/remix-run/react-router\",\n \"licenseUrl\": \"https://github.com/remix-run/react-router/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"fbjs@0.8.16\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/fbjs\",\n \"licenseUrl\": \"https://github.com/facebook/fbjs/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"redux@4.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-highcharts@16.1.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/kirjs/react-highcharts\",\n \"licenseUrl\": \"https://github.com/kirjs/react-highcharts/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"lodash@4.17.21\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/lodash/lodash\",\n \"licenseUrl\": \"https://github.com/lodash/lodash/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"highcharts@7.2.2\"\n\n \"licenses\": \"https://www.highcharts.com/license\",\n \"repository\": \"https://github.com/highcharts/highcharts-dist\",\n\n-----------------------------------------------------------\n\n\"highcharts@9.1.0\"\n\n \"licenses\": \"https://www.highcharts.com/license\",\n \"repository\": \"https://github.com/highcharts/highcharts-dist\",\n\n-----------------------------------------------------------\n\n\"pegjs@0.10.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/pegjs/pegjs\",\n \"licenseUrl\": \"https://github.com/pegjs/pegjs/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-overlays@0.7.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/react-bootstrap/react-overlays\",\n \"licenseUrl\": \"https://github.com/react-bootstrap/react-overlays/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"jquery@3.6.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/jquery/jquery\",\n \"licenseUrl\": \"https://github.com/jquery/jquery/raw/main/LICENSE.txt\",\n\n-----------------------------------------------------------\n\n\"react-bootstrap-typeahead@1.4.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ericgio/react-bootstrap-typeahead\",\n \"licenseUrl\": \"https://github.com/ericgio/react-bootstrap-typeahead/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"@reduxjs/toolkit@1.5.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux-toolkit\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux-toolkit/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\"@reduxjs/toolkit@1.8.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux-toolkit\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux-toolkit/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-table@6.11.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/TanStack/table\",\n \"licenseUrl\": \"https://github.com/TanStack/table/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"json-structure-validator@1.2.1\"\n\n \"licenses\": \"none\",\n \"repository\": \"https://github.com/AntJanus/JSON-structure-validator\",\n\n-----------------------------------------------------------\n\n\"keycode@2.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/timoxley/keycode\",\n \"licenseUrl\": \"https://github.com/timoxley/keycode/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-intl@2.4.0\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/formatjs/formatjs\",\n\n-----------------------------------------------------------\n\n\"intl-messageformat@2.1.0\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/formatjs/formatjs\",\n\n-----------------------------------------------------------\n\n\"intl-messageformat@9.6.16\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/formatjs/formatjs\",\n\n-----------------------------------------------------------\n\n\"rc-slider@8.3.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/react-component/slider\",\n \"licenseUrl\": \"https://github.com/react-component/slider/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"graphql-tag@2.12.6\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/apollographql/graphql-tag\",\n \"licenseUrl\": \"https://github.com/apollographql/graphql-tag/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-notification-system@0.2.15\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/igorprado/react-notification-system\",\n \"licenseUrl\": \"https://github.com/igorprado/react-notification-system/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"history@4.10.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/remix-run/history\",\n \"licenseUrl\": \"https://github.com/remix-run/history/raw/dev/LICENSE\",\n\n-----------------------------------------------------------\n\n\"rc-datetime-picker@4.10.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/AllenWooooo/rc-datetime-picker\",\n \"licenseUrl\": \"https://github.com/AllenWooooo/rc-datetime-picker/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"rc-tooltip@3.4.9\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/react-component/tooltip\",\n \"licenseUrl\": \"https://github.com/react-component/tooltip/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-addons-shallow-compare@15.6.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-router-bootstrap@0.25.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/react-bootstrap/react-router-bootstrap\",\n \"licenseUrl\": \"https://github.com/react-bootstrap/react-router-bootstrap/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"redux-thunk@2.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux-thunk\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux-thunk/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"apollo-boost@0.1.28\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/apollographql/apollo-client\",\n \"licenseUrl\": \"https://github.com/apollographql/apollo-client/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"redux-observable@0.18.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/redux-observable/redux-observable\",\n \"licenseUrl\": \"https://github.com/redux-observable/redux-observable/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-router-redux@4.0.8\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reactjs/react-router-redux\",\n \"licenseUrl\": \"https://github.com/reactjs/react-router-redux/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"intl@1.2.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/andyearnshaw/Intl.js\",\n \"licenseUrl\": \"https://github.com/andyearnshaw/Intl.js/raw/master/LICENSE.txt\",\n\n-----------------------------------------------------------\n\n\"babel-polyfill@6.26.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/babel/babel/tree/master/packages/babel-polyfill\",\n \"licenseUrl\": \"https://github.com/babel/babel/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"@babel-runtime@7.21.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/babel/babel/tree/main/packages/babel-runtime\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/babel/babel/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"whatwg-fetch@2.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/github/fetch\",\n \"licenseUrl\": \"https://github.com/github/fetch/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-text-mask@5.0.2\"\n\n \"licenses\": \"Unlicense\",\n \"repository\": \"https://github.com/text-mask/text-mask\",\n \"licenseUrl\": \"https://github.com/text-mask/text-mask/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-select@1.3.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/JedWatson/react-select/tree/master/packages/react-select\",\n \"licenseUrl\": \"https://github.com/JedWatson/react-select/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-dock@0.2.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/reduxjs/redux-devtools\",\n \"licenseUrl\": \"https://github.com/reduxjs/redux-devtools/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"css-toggle-switch@4.1.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ghinda/css-toggle-switch\",\n \"licenseUrl\": \"https://github.com/ghinda/css-toggle-switch/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"dompurify@2.3.8\"\n\n \"licenses\": \"MPL-2.0 OR Apache-2.0\",\n \"repository\": \"https://github.com/cure53/DOMPurify\",\n \"licenseUrl\": \"https://github.com/cure53/DOMPurify/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-copy-to-clipboard@5.0.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/nkbt/react-copy-to-clipboard\",\n \"licenseUrl\": \"https://github.com/nkbt/react-copy-to-clipboard/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-duallist@1.1.6\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/jyotirmaybanerjee/react-duallist\",\n \"licenseUrl\": \"https://github.com/jyotirmaybanerjee/react-duallist/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"redux-devtools-extension@2.13.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/zalmoxisus/redux-devtools-extension\",\n \"licenseUrl\": \"https://github.com/zalmoxisus/redux-devtools-extension/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"axios-mock-adapter@1.19.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ctimmerm/axios-mock-adapter\",\n \"licenseUrl\": \"https://github.com/ctimmerm/axios-mock-adapter/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"axios@0.21.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/axios/axios\",\n \"licenseUrl\": \"https://github.com/axios/axios/raw/v0.x/LICENSE\",\n\n-----------------------------------------------------------\n\n\"axios@0.27.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/axios/axios\",\n \"licenseUrl\": \"https://github.com/axios/axios/raw/v0.x/LICENSE\",\n\n-----------------------------------------------------------\n\n\"codemirror@5.62.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/codemirror/basic-setup\",\n \"licenseUrl\": \"https://github.com/codemirror/basic-setup/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"codemirror@5.65.12\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/codemirror/basic-setup\",\n \"licenseUrl\": \"https://github.com/codemirror/basic-setup/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet-icons@4.9.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet-icons\",\n \"licenseUrl\": \"https://github.com/grommet/grommet-icons/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet-icons@4.10.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet-icons\",\n \"licenseUrl\": \"https://github.com/grommet/grommet-icons/raw/master/LICENSE\",\n-----------------------------------------------------------\n\n\"grommet@2.25.1\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet\",\n \"licenseUrl\": \"https://github.com/grommet/grommet/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet@2.31.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet\",\n \"licenseUrl\": \"https://github.com/grommet/grommet/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"highcharts-react-official@3.0.0\"\n\n \"licenses\": \"https://github.com/highcharts/highcharts-react/raw/master/LICENSE\",\n \"repository\": \"https://github.com/highcharts/highcharts-react\",\n\n-----------------------------------------------------------\n\n\"react-codemirror2@7.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/scniro/react-codemirror2\",\n \"licenseUrl\": \"https://github.com/scniro/react-codemirror2/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"styled-components@5.3.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/styled-components/styled-components\",\n \"licenseUrl\": \"https://github.com/styled-components/styled-components/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"styled-components@5.3.9\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/styled-components/styled-components\",\n \"licenseUrl\": \"https://github.com/styled-components/styled-components/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet-theme-hpe@3.2.1\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet-theme-hpe\",\n \"licenseUrl\": \"https://github.com/grommet/grommet-theme-hpe/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"uuid@8.3.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/uuidjs/uuid\",\n \"licenseUrl\": \"https://github.com/uuidjs/uuid/raw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"use-debounce@7.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/xnimorz/use-debounce\",\n \"licenseUrl\": \"https://github.com/xnimorz/use-debounce/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"deep-equal@1.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/node-deep-equal\",\n \"licenseUrl\": \"https://github.com/inspect-js/node-deep-equal/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-d3@0.4.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/esbullington/react-d3\",\n \"licenseUrl\": \"https://github.com/esbullington/react-d3/raw/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"react-immutable-proptypes@2.1.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/HurricaneJames/react-immutable-proptypes\",\n \"licenseUrl\": \"https://github.com/HurricaneJames/react-immutable-proptypes/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"swagger-ui-dist@3.23.11\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/swagger-api/swagger-ui\",\n \"licenseUrl\": \"https://github.com/swagger-api/swagger-ui/raw/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"swagger-ui-themes@3.0.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ostranme/swagger-ui-themes\",\n\n-----------------------------------------------------------\n\n\"deepmerge@4.3.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/TehShrike/deepmerge\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/TehShrike/deepmerge/master/license.txt\",\n\n-----------------------------------------------------------\n\n\"exenv@1.2.2\"\n\n \"licenses\": \"BSD\",\n \"repository\": \"https://github.com/JedWatson/exenv\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/JedWatson/exenv/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"grommet-styles@0.2.0\"\n\n \"licenses\": \"Apache-2.0\",\n \"repository\": \"https://github.com/grommet/grommet-styles\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/grommet/grommet-styles/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"hoist-non-react-statics@3.3.2\"\n\n \"licenses\": \"BSD\",\n \"repository\": \"https://github.com/mridgway/hoist-non-react-statics\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/mridgway/hoist-non-react-statics/master/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"object-assign@4.1.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/sindresorhus/object-assign\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/sindresorhus/object-assign/main/license\",\n\n-----------------------------------------------------------\n\n\"react-fast-compare@3.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/FormidableLabs/react-fast-compare\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/FormidableLabs/react-fast-compare/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-is@18.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/facebook/react/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-joyride@2.5.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/gilbarbara/react-joyride\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/gilbarbara/react-joyride/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-proptype-conditional-require@1.0.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/beefancohen/react-proptype-conditional-require\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/beefancohen/react-proptype-conditional-require/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-query@3.39.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/TanStack/query\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/TanStack/query/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-side-effect@2.1.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/gaearon/react-side-effect\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/gaearon/react-side-effect/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"scheduler@0.23.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/facebook/react\",\n \"licenseUrl\": \"https://github.com/facebook/react/raw/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"scroll@3.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/michaelrhodes/scroll\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/michaelrhodes/scroll/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"scrollparent@2.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/olahol/scrollparent.js\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/olahol/scrollparent.js/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"shallowequal@1.1.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/dashed/shallowequal\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/dashed/shallowequal/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"@mswjs/cookies@0.2.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/mswjs/cookies\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/mswjs/cookies/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"@open-draft/until@1.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/open-draft/until\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/open-draft/until/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"@xmldom/xmldom@0.8.7\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/xmldom/xmldom\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/xmldom/xmldom/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"available-typed-arrays@1.0.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/available-typed-arrays\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/available-typed-arrays/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"base64-js@1.5.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/beatgammit/base64-js\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/beatgammit/base64-js/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"buffer@6.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/feross/buffer\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/feross/buffer/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"call-bind@1.0.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ljharb/call-bind\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/ljharb/call-bind/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"cookie@0.4.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/jshttp/cookie\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/jshttp/cookie/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"debug@4.3.4\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/debug-js/debug\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/debug-js/debug/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"esprima@4.0.1\"\n\n \"licenses\": \"BSD-2-Clause\",\n \"repository\": \"https://github.com/jquery/esprima\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/jquery/esprima/main/LICENSE.BSD\",\n\n-----------------------------------------------------------\n\n\"events@3.3.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/browserify/events\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/browserify/events/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"for-each@0.3.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/Raynos/for-each\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/Raynos/for-each/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"function-bind@1.1.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/Raynos/function-bind\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/Raynos/function-bind/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"get-intrinsic@1.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ljharb/get-intrinsic\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/ljharb/get-intrinsic/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"gopd@1.0.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/ljharb/gopd\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/ljharb/gopd/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"has-symbols@1.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/has-symbols\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/has-symbols/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"has-tostringtag@1.0.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/has-tostringtag\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/has-tostringtag/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"has@1.0.3\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/tarruda/has\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/tarruda/has/master/LICENSE-MIT\",\n\n-----------------------------------------------------------\n\n\"headers-polyfill@3.1.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/mswjs/headers-polyfill\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/mswjs/headers-polyfill/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"ieee754@1.2.1\"\n\n \"licenses\": \"BSD-3-Clause\",\n \"repository\": \"https://github.com/feross/ieee754\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/feross/ieee754/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"inherits@2.0.4\"\n\n \"licenses\": \"ISC\",\n \"repository\": \"https://github.com/isaacs/inherits\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/isaacs/inherits/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"is-arguments@1.1.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/is-arguments\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/is-arguments/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"is-callable@1.2.7\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/is-callable\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/is-callable/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"is-generator-function@1.0.10\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/is-generator-function\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/is-generator-function/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"is-node-process@1.2.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/mswjs/is-node-process\",\n\n-----------------------------------------------------------\n\n\"is-typed-array@1.1.10\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/is-typed-array\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/is-typed-array/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"js-levenshtein@1.1.6\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/gustf/js-levenshtein\",\n \"licenseUrl\": \"https://github.com/gustf/js-levenshtein/blob/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"js-yaml@3.14.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/nodeca/js-yaml\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/nodeca/js-yaml/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"ms@2.1.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/vercel/ms\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/vercel/ms/master/license.md\",\n\n-----------------------------------------------------------\n\n\"msw@1.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/mswjs/msw\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/mswjs/msw/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"node-fetch@2.6.9\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/node-fetch/node-fetch\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/node-fetch/node-fetch/main/LICENSE.md\",\n\n-----------------------------------------------------------\n\n\"outvariant@1.4.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/open-draft/outvariant\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/open-draft/outvariant/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"path-to-regexp@6.2.1\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/pillarjs/path-to-regexp\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/pillarjs/path-to-regexp/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"set-cookie-parser@2.6.0\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/nfriedly/set-cookie-parser\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/nfriedly/set-cookie-parser/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"strict-event-emitter@0.4.6\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/open-draft/strict-event-emitter\",\n\n-----------------------------------------------------------\n\n\"util@0.12.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/browserify/node-util\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/browserify/node-util/master/LICENSE\",\n\n-----------------------------------------------------------\n\n\"web-encoding@1.1.5\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/gozala/web-encoding\",\n\n-----------------------------------------------------------\n\n\"which-typed-array@1.1.9\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/inspect-js/which-typed-array\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/inspect-js/which-typed-array/main/LICENSE\",\n\n-----------------------------------------------------------\n\n\"react-toastify@9.1.2\"\n\n \"licenses\": \"MIT\",\n \"repository\": \"https://github.com/fkhadra/react-toastify\",\n \"licenseUrl\": \"https://raw.githubusercontent.com/fkhadra/react-toastify/main/LICENSE\",\n\n-----------------------------------------------------------\ncommons-beanutils\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\ncommons-configuration\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\njoda-time\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\njna\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\ncommons-lang\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nehcache-core\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nannotations\n\nLicense: GNU Lesser Public License\nhttp://www.gnu.org/licenses/lgpl.html\n\n-----------------------------------------------------------\n\nhazelcast\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\njersey-server\n\nLicense: CDDL+GPL License\nhttp://glassfish.java.net/public/CDDL+GPL_1_1.html\n\n-----------------------------------------------------------\n\nlibpam4j\n\nLicense: The MIT license\nhttp://www.opensource.org/licenses/mit-license.php\n\n-----------------------------------------------------------\n\nlombok\n\nLicense: The MIT License\nhttps://projectlombok.org/LICENSE\n\n-----------------------------------------------------------\n\nspring-security-core\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nspring-security-kerberos-core\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nswagger-annotations\n\nCopyright (c) 2009 The Apache Software Foundation.\n\nLicense: Apache License, Version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0.html\n\n-----------------------------------------------------------\n\nApache Ranger\n\nCopyright 2014-2022 The Apache Software Foundation\n\nLicense: Apache License Version 2.0, January 2004\nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nApache NiFi\n\nCopyright 2014-2022 The Apache Software Foundation\n\nLicense: Apache License Version 2.0, January 2004 \nhttp://www.apache.org/licenses/LICENSE-2.0\n\n-----------------------------------------------------------\n\nApache Airflow\n\nCopyright 2016-2021 The Apache Software Foundation\n\nLicense: Apache License Version 2.0, January 2004 \nhttp://www.apache.org/licenses/LICENSE-2.0\n\n\n=========================================================== Apache License Apache License\n Version 2.0, January 2004\n http://www.apache.org/licenses/\n\n\n TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n\n 1. Definitions.\n\n\n \"License\" shall mean the terms and conditions for use, reproduction,\n and distribution as defined by Sections 1 through 9 of this document.\n\n\n \"Licensor\" shall mean the copyright owner or entity authorized by\n the copyright owner that is granting the License.\n\n\n \"Legal Entity\" shall mean the union of the acting entity and all\n other entities that control, are controlled by, or are under common\n control with that entity. For the purposes of this definition,\n \"control\" means (i) the power, direct or indirect, to cause the\n direction or management of such entity, whether by contract or\n otherwise, or (ii) ownership of fifty percent (50%) or more of the\n outstanding shares, or (iii) beneficial ownership of such entity.\n\n\n \"You\" (or \"Your\") shall mean an individual or Legal Entity\n exercising permissions granted by this License.\n\n\n \"Source\" form shall mean the preferred form for making modifications,\n including but not limited to software source code, documentation\n source, and configuration files.\n\n\n \"Object\" form shall mean any form resulting from mechanical\n transformation or translation of a Source form, including but\n not limited to compiled object code, generated documentation,\n and conversions to other media types.\n\n\n \"Work\" shall mean the work of authorship, whether in Source or\n Object form, made available under the License, as indicated by a\n copyright notice that is included in or attached to the work\n (an example is provided in the Appendix below).\n\n\n \"Derivative Works\" shall mean any work, whether in Source or Object\n form, that is based on (or derived from) the Work and for which the\n editorial revisions, annotations, elaborations, or other modifications\n represent, as a whole, an original work of authorship. For the purposes\n of this License, Derivative Works shall not include works that remain\n separable from, or merely link (or bind by name) to the interfaces of,\n the Work and Derivative Works thereof.\n\n\n \"Contribution\" shall mean any work of authorship, including\n the original version of the Work and any modifications or additions\n to that Work or Derivative Works thereof, that is intentionally\n submitted to Licensor for inclusion in the Work by the copyright owner\n or by an individual or Legal Entity authorized to submit on behalf of\n the copyright owner. For the purposes of this definition, \"submitted\"\n means any form of electronic, verbal, or written communication sent\n to the Licensor or its representatives, including but not limited to\n communication on electronic mailing lists, source code control systems,\n and issue tracking systems that are managed by, or on behalf of, the\n Licensor for the purpose of discussing and improving the Work, but\n excluding communication that is conspicuously marked or otherwise\n designated in writing by the copyright owner as \"Not a Contribution.\"\n\n\n \"Contributor\" shall mean Licensor and any individual or Legal Entity\n on behalf of whom a Contribution has been received by Licensor and\n subsequently incorporated within the Work.\n\n\n 2. Grant of Copyright License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n copyright license to reproduce, prepare Derivative Works of,\n publicly display, publicly perform, sublicense, and distribute the\n Work and such Derivative Works in Source or Object form.\n\n\n 3. Grant of Patent License. Subject to the terms and conditions of\n this License, each Contributor hereby grants to You a perpetual,\n worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n (except as stated in this section) patent license to make, have made,\n use, offer to sell, sell, import, and otherwise transfer the Work,\n where such license applies only to those patent claims licensable\n by such Contributor that are necessarily infringed by their\n Contribution(s) alone or by combination of their Contribution(s)\n with the Work to which such Contribution(s) was submitted. If You\n institute patent litigation against any entity (including a\n cross-claim or counterclaim in a lawsuit) alleging that the Work\n or a Contribution incorporated within the Work constitutes direct\n or contributory patent infringement, then any patent licenses\n granted to You under this License for that Work shall terminate\n as of the date such litigation is filed.\n\n\n 4. Redistribution. You may reproduce and distribute copies of the\n Work or Derivative Works thereof in any medium, with or without\n modifications, and in Source or Object form, provided that You\n meet the following conditions:\n\n\n (a) You must give any other recipients of the Work or\n Derivative Works a copy of this License; and\n\n\n (b) You must cause any modified files to carry prominent notices\n stating that You changed the files; and\n\n\n (c) You must retain, in the Source form of any Derivative Works\n that You distribute, all copyright, patent, trademark, and\n attribution notices from the Source form of the Work,\n excluding those notices that do not pertain to any part of\n the Derivative Works; and\n\n\n (d) If the Work includes a \"NOTICE\" text file as part of its\n distribution, then any Derivative Works that You distribute must\n include a readable copy of the attribution notices contained\n within such NOTICE file, excluding those notices that do not\n pertain to any part of the Derivative Works, in at least one\n of the following places: within a NOTICE text file distributed\n as part of the Derivative Works; within the Source form or\n documentation, if provided along with the Derivative Works; or,\n within a display generated by the Derivative Works, if and\n wherever such third-party notices normally appear. The contents\n of the NOTICE file are for informational purposes only and\n do not modify the License. You may add Your own attribution\n notices within Derivative Works that You distribute, alongside\n or as an addendum to the NOTICE text from the Work, provided\n that such additional attribution notices cannot be construed\n as modifying the License.\n\n\n You may add Your own copyright statement to Your modifications and\n may provide additional or different license terms and conditions\n for use, reproduction, or distribution of Your modifications, or\n for any such Derivative Works as a whole, provided Your use,\n reproduction, and distribution of the Work otherwise complies with\n the conditions stated in this License.\n\n\n 5. Submission of Contributions. Unless You explicitly state otherwise,\n any Contribution intentionally submitted for inclusion in the Work\n by You to the Licensor shall be under the terms and conditions of\n this License, without any additional terms or conditions.\n Notwithstanding the above, nothing herein shall supersede or modify\n the terms of any separate license agreement you may have executed\n with Licensor regarding such Contributions.\n\n\n 6. Trademarks. This License does not grant permission to use the trade\n names, trademarks, service marks, or product names of the Licensor,\n except as required for reasonable and customary use in describing the\n origin of the Work and reproducing the content of the NOTICE file.\n\n\n 7. Disclaimer of Warranty. Unless required by applicable law or\n agreed to in writing, Licensor provides the Work (and each\n Contributor provides its Contributions) on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n implied, including, without limitation, any warranties or conditions\n of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n PARTICULAR PURPOSE. You are solely responsible for determining the\n appropriateness of using or redistributing the Work and assume any\n risks associated with Your exercise of permissions under this License.\n\n\n 8. Limitation of Liability. In no event and under no legal theory,\n whether in tort (including negligence), contract, or otherwise,\n unless required by applicable law (such as deliberate and grossly\n negligent acts) or agreed to in writing, shall any Contributor be\n liable to You for damages, including any direct, indirect, special,\n incidental, or consequential damages of any character arising as a\n result of this License or out of the use or inability to use the\n Work (including but not limited to damages for loss of goodwill,\n work stoppage, computer failure or malfunction, or any and all\n other commercial damages or losses), even if such Contributor\n has been advised of the possibility of such damages.\n\n\n 9. Accepting Warranty or Additional Liability. While redistributing\n the Work or Derivative Works thereof, You may choose to offer,\n and charge a fee for, acceptance of support, warranty, indemnity,\n or other liability obligations and/or rights consistent with this\n License. However, in accepting such obligations, You may act only\n on Your own behalf and on Your sole responsibility, not on behalf\n of any other Contributor, and only if You agree to indemnify,\n defend, and hold each Contributor harmless for any liability\n incurred by, or claims asserted against, such Contributor by reason\n of your accepting any such warranty or additional liability.\n\n\n END OF TERMS AND CONDITIONS\n\n\n APPENDIX: How to apply the Apache License to your work.\n\n\n To apply the Apache License to your work, attach the following\n boilerplate notice, with the fields enclosed by brackets \"[]\"\n replaced with your own identifying information. (Don't include\n the brackets!) The text should be enclosed in the appropriate\n comment syntax for the file format. We also recommend that a\n file or class name and description of purpose be included on the\n same \"printed page\" as the copyright notice for easier\n identification within third-party archives.\n\n\n Copyright [yyyy] [name of copyright owner]\n\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n\n=========================================================== MIT License The MIT License\n\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n\n=========================================================== License for uuid License for uuid:\n\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n1. Redistributions of source code must retain the above copyright\n notice, and the entire permission notice in its entirety,\n including the disclaimer of warranties.\n2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n3. The name of the author may not be used to endorse or promote\n products derived from this software without specific prior\n written permission.\n\n\nTHIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED\nWARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\nOF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF\nWHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT\nOF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR\nBUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE\nUSE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE. License for JavaMail License for JavaMail:\n\n\nOracle Corporation (\"ORACLE\") ENTITLEMENT for SOFTWARE\n\n\nLicensee/Company: Entity receiving Software.\n\n\nEffective Date: Date of delivery of the Software to You.\n\n\nSoftware: JavaMail 1.4.4\n\n\nLicense Term: Perpetual (subject to termination under the SLA).\n\n\nLicensed Unit: Software Copy.\n\n\nLicensed unit Count: Unlimited.\n\n\nPermitted Uses:\n\n\n1. You may reproduce and use the Software for Your own Individual,\nCommercial and Research and Instructional Use only for the purposes of\ndesigning, developing, testing, and running Your applets and\napplications (\"Programs\").\n\n\n2. Subject to the terms and conditions of this Agreement and\nrestrictions and exceptions set forth in the Software's documentation,\nYou may reproduce and distribute portions of Software identified as a\nredistributable in the documentation (each a \"Redistributable\"),\nprovided that You comply with the following (note that You may be\nentitled to reproduce and distribute other portions of the Software not\ndefined in the documentation as a Redistributable under certain other\nlicenses as described in the THIRDPARTYLICENSEREADME, if applicable):\n\n\n(a) You distribute Redistributable complete and unmodified and only\nbundled as part of Your Programs,\n\n\n(b) Your Programs add significant and primary functionality to the\nRedistributable,\n\n\n(c) You distribute Redistributable for the sole purpose of running Your\nPrograms,\n\n\n(d) You do not distribute additional software intended to replace any\ncomponent(s) of the Redistributable,\n\n\n(e) You do not remove or alter any proprietary legends or notices\ncontained in or on the Redistributable.\n\n\n(f) You only distribute the Redistributable subject to a license\nagreement that protects Oracle's interests consistent with the terms\ncontained in this Agreement, and\n\n\n(g) You agree to defend and indemnify Oracle and its licensors from and\nagainst any damages, costs, liabilities, settlement amounts and/or\nexpenses (including attorneys' fees) incurred in connection with any\nclaim, lawsuit or action by any third party that arises or results from\nthe use or distribution of any and all Programs and/or\nRedistributable.\n\n\n3. Java Technology Restrictions. You may not create, modify, or change\nthe behavior of, or authorize Your licensees to create, modify, or\nchange the behavior of, classes, interfaces, or subpackages that are in\nany way identified as \"java\", \"javax\", \"sun\" or similar convention as\nspecified by Oracle in any naming convention designation.\n\n\n4. No Diagnostic, Maintenance, Repair or Technical Support Services.\nThe scope of Your license does not include any right, express or\nimplied, (i) to access, copy, distribute, display or use the Software\nto provide diagnostic, maintenance, repair or technical support\nservices for Oracle software or Oracle hardware on behalf of any third party\nfor Your direct or indirect commercial gain or advantage, without Oracle's\nprior written authorization, or (ii) for any third party to access,\ncopy, distribute, display or use the Software to provide diagnostic,\nmaintenance, repair or technical support services for Oracle software or\nOracle hardware on Your behalf for such party's direct or indirect\ncommercial gain or advantage, without Oracle's prior written\nauthorization. The limitations set forth in this paragraph apply to any\nand all error corrections, patches, updates, and upgrades to the\nSoftware You may receive, access, download or otherwise obtain from\nOracle.\n\n\n5. Records and Documentation. During the term of the SLA and\nEntitlement, and for a period of three (3) years thereafter, You agree\nto keep proper records and documentation of Your compliance with the\nSLA and Entitlement. Upon Oracle's reasonable request, You will provide\ncopies of such records and documentation to Oracle for the purpose of\nconfirming Your compliance with the terms and conditions of the SLA and\nEntitlement. This section will survive any termination of the SLA and\nEntitlement. You may terminate this SLA and Entitlement at any time by\ndestroying all copies of the Software in which case the obligations set\nforth in Section 7 of the SLA shall apply.\n\n\n\n\nOracle Corporation (\"ORACLE\")\nSOFTWARE LICENSE AGREEMENT\n\n\nREAD THE TERMS OF THIS AGREEMENT (\"AGREEMENT\") CAREFULLY BEFORE OPENING\nSOFTWARE MEDIA PACKAGE. BY OPENING SOFTWARE MEDIA PACKAGE, YOU AGREE TO\nTHE TERMS OF THIS AGREEMENT. IF YOU ARE ACCESSING SOFTWARE\nELECTRONICALLY, INDICATE YOUR ACCEPTANCE OF THESE TERMS BY SELECTING\nTHE \"ACCEPT\" BUTTON AT THE END OF THIS AGREEMENT. IF YOU DO NOT AGREE\nTO ALL OF THE TERMS, PROMPTLY RETURN THE UNUSED SOFTWARE TO YOUR PLACE\nOF PURCHASE FOR A REFUND OR, IF SOFTWARE IS ACCESSED ELECTRONICALLY,\nSELECT THE \"DECLINE\" (OR \"EXIT\") BUTTON AT THE END OF THIS AGREEMENT.\nIF YOU HAVE SEPARATELY AGREED TO LICENSE TERMS (\"MASTER TERMS\") FOR\nYOUR LICENSE TO THIS SOFTWARE, THEN SECTIONS 1-6 OF THIS AGREEMENT\n(\"SUPPLEMENTAL LICENSE TERMS\") SHALL SUPPLEMENT AND SUPERSEDE THE\nMASTER TERMS IN RELATION TO THIS SOFTWARE.\n\n\n1. Definitions.\n\n\n(a) \"Entitlement\" means the collective set of applicable documents\nauthorized by Oracle evidencing your obligation to pay associated fees (if\nany) for the license, associated Services, and the authorized scope of\nuse of Software under this Agreement.\n\n\n(b) \"Licensed Unit\" means the unit of measure by which your use of\nSoftware and/or Service is licensed, as described in your Entitlement.\n\n\n(c) \"Permitted Use\" means the licensed Software use(s) authorized\nin this Agreement as specified in your Entitlement. The Permitted Use\nfor any bundled Oracle software not specified in your Entitlement will be\nevaluation use as provided in Section 3.\n\n\n(d) \"Service\" means the service(s) that Oracle or its delegate will\nprovide, if any, as selected in your Entitlement and as further\ndescribed in the applicable service listings at\nwww.sun.com/service/servicelist.\n\n\n(e) \"Software\" means the Oracle software described in your\nEntitlement. Also, certain software may be included for evaluation use\nunder Section 3.\n\n\n(f) \"You\" and \"Your\" means the individual or legal entity specified\nin the Entitlement, or for evaluation purposes, the entity performing\nthe evaluation.\n\n\n2. License Grant and Entitlement.\n\n\nSubject to the terms of your Entitlement, Oracle grants you a\nnonexclusive, nontransferable limited license to use Software for its\nPermitted Use for the license term. Your Entitlement will specify (a)\nSoftware licensed, (b) the Permitted Use, (c) the license term, and (d)\nthe Licensed Units.\n\n\nAdditionally, if your Entitlement includes Services, then it will also\nspecify the (e) Service and (f) service term.\n\n\nIf your rights to Software or Services are limited in duration and the\ndate such rights begin is other than the purchase date, your\nEntitlement will provide that beginning date(s).\n\n\nThe Entitlement may be delivered to you in various ways depending on\nthe manner in which you obtain Software and Services, for example, the\nEntitlement may be provided in your receipt, invoice or your contract\nwith Oracle or authorized Oracle reseller. It may also be in electronic\nformat if you download Software.\n\n\n3. Permitted Use.\n\n\nAs selected in your Entitlement, one or more of the following Permitted\nUses will apply to your use of Software. Unless you have an Entitlement\nthat expressly permits it, you may not use Software for any of the\nother Permitted Uses. If you don't have an Entitlement, or if your\nEntitlement doesn't cover additional software delivered to you, then\nsuch software is for your Evaluation Use.\n\n\n(a) Evaluation Use. You may evaluate Software internally for a period\nof 90 days from your first use.\n\n\n(b) Research and Instructional Use. You may use Software internally to\ndesign, develop and test, and also to provide instruction on such\nuses.\n\n\n(c) Individual Use. You may use Software internally for personal,\nindividual use.\n\n\n(d) Commercial Use. You may use Software internally for your own\ncommercial purposes.\n\n\n(e) Service Provider Use. You may make Software functionality\naccessible (but not by providing Software itself or through outsourcing\nservices) to your end users in an extranet deployment, but not to your\naffiliated companies or to government agencies.\n\n\n4. Licensed Units.\n\n\nYour Permitted Use is limited to the number of Licensed Units stated in\nyour Entitlement. If you require additional Licensed Units, you will\nneed additional Entitlement(s).\n\n\n5. Restrictions.\n\n\n(a) The copies of Software provided to you under this Agreement are\nlicensed, not sold, to you by Oracle. Oracle reserves all rights not\nexpressly granted. (b) You may make a single archival copy of Software,\nbut otherwise may not copy, modify, or distribute Software. However if\nthe Oracle documentation accompanying Software lists specific portions of\nSoftware, such as header files, class libraries, reference source code,\nand/or redistributable files, that may be handled differently, you may\ndo so only as provided in the Oracle documentation. (c) You may not rent,\nlease, lend or encumber Software. (d) Unless enforcement is prohibited\nby applicable law, you may not decompile, or reverse engineer Software.\n(e) The terms and conditions of this Agreement will apply to any\nSoftware updates, provided to you at Oracle's discretion, that replace\nand/or supplement the original Software, unless such update contains a\nseparate license. (f) You may not publish or provide the results of any\nbenchmark or comparison tests run on Software to any third party\nwithout the prior written consent of Oracle. (g) Software is confidential\nand copyrighted. (h) Unless otherwise specified, if Software is\ndelivered with embedded or bundled software that enables functionality\nof Software, you may not use such software on a stand-alone basis or\nuse any portion of such software to interoperate with any program(s)\nother than Software. (i) Software may contain programs that perform\nautomated collection of system data and/or automated software updating\nservices. System data collected through such programs may be used by\nOracle, its subcontractors, and its service delivery partners for the\npurpose of providing you with remote system services and/or improving\nOracle's software and systems. (j) Software is not designed, licensed or\nintended for use in the design, construction, operation or maintenance\nof any nuclear facility and Oracle and its licensors disclaim any express\nor implied warranty of fitness for such uses. (k) No right, title or\ninterest in or to any trademark, service mark, logo or trade name of\nOracle or its licensors is granted under this Agreement.\n\n\n6. Java Compatibility and Open Source.\n\n\nSoftware may contain Java technology. You may not create additional\nclasses to, or modifications of, the Java technology, except under\ncompatibility requirements available under a separate agreement\navailable at www.java.net.\n\n\nOracle supports and benefits from the global community of open source\ndevelopers, and thanks the community for its important contributions\nand open standards-based technology, which Oracle has adopted into many of\nits products.\n\n\nPlease note that portions of Software may be provided with notices and\nopen source licenses from such communities and third parties that\ngovern the use of those portions, and any licenses granted hereunder do\nnot alter any rights and obligations you may have under such open\nsource licenses, however, the disclaimer of warranty and limitation of\nliability provisions in this Agreement will apply to all Software in\nthis distribution.\n\n\n7. Term and Termination.\n\n\nThe license and service term are set forth in your Entitlement(s). Your\nrights under this Agreement will terminate immediately without notice\nfrom Oracle if you materially breach it or take any action in derogation\nof Oracle's and/or its licensors' rights to Software. Oracle may terminate\nthis Agreement should any Software become, or in Oracle's reasonable\nopinion likely to become, the subject of a claim of intellectual\nproperty infringement or trade secret misappropriation. Upon\ntermination, you will cease use of, and destroy, Software and confirm\ncompliance in writing to Oracle. Sections 1, 5, 6, 7, and 9-15 will\nsurvive termination of the Agreement.\n\n\n8. Limited Warranty.\n\n\nOracle warrants to you that for a period of 90 days from the date of\npurchase, as evidenced by a copy of the receipt, the media on which\nSoftware is furnished (if any) will be free of defects in materials and\nworkmanship under normal use. Except for the foregoing, Software is\nprovided \"AS IS\". Your exclusive remedy and Oracle's entire liability\nunder this limited warranty will be at Oracle's option to replace Software\nmedia or refund the fee paid for Software. Some states do not allow\nlimitations on certain implied warranties, so the above may not apply\nto you. This limited warranty gives you specific legal rights. You may\nhave others, which vary from state to state.\n\n\n9. Disclaimer of Warranty.\n\n\nUNLESS SPECIFIED IN THIS AGREEMENT, ALL EXPRESS OR IMPLIED CONDITIONS,\nREPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT\nARE DISCLAIMED, EXCEPT TO THE EXTENT THAT THESE DISCLAIMERS ARE HELD TO\nBE LEGALLY INVALID.\n\n\n10. Limitation of Liability.\n\n\nTO THE EXTENT NOT PROHIBITED BY LAW, IN NO EVENT WILL ORACLE OR ITS\nLICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR\nSPECIAL, INDIRECT, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES,\nHOWEVER CAUSED REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF OR\nRELATED TO THE USE OF OR INABILITY TO USE SOFTWARE, EVEN IF ORACLE HAS\nBEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. In no event will Oracle's\nliability to you, whether in contract, tort (including negligence), or\notherwise, exceed the amount paid by you for Software under this\nAgreement. The foregoing limitations will apply even if the above\nstated warranty fails of its essential purpose. Some states do not\nallow the exclusion of incidental or consequential damages, so some of\nthe terms above may not be applicable to you.\n\n\n11. Export Regulations.\n\n\nAll Software, documents, technical data, and any other materials\ndelivered under this Agreement are subject to U.S. export control laws\nand may be subject to export or import regulations in other countries.\nYou agree to comply strictly with these laws and regulations and\nacknowledge that you have the responsibility to obtain any licenses to\nexport, re-export, or import as may be required after delivery to you.\n\n\n12. U.S. Government Restricted Rights.\n\n\nIf Software is being acquired by or on behalf of the U.S. Government or\nby a U.S. Government prime contractor or subcontractor (at any tier),\nthen the Government's rights in Software and accompanying documentation\nwill be only as set forth in this Agreement; this is in accordance with\n48 CFR 227.7201 through 227.7202-4 (for Department of Defense (DOD)\nacquisitions) and with 48 CFR 2.101 and 12.212 (for non-DOD\nacquisitions).\n\n\n13. Governing Law.\n\n\nAny action related to this Agreement will be governed by California law\nand controlling U.S. federal law. No choice of law rules of any\njurisdiction will apply.\n\n\n14. Severability.\n\n\nIf any provision of this Agreement is held to be unenforceable, this\nAgreement will remain in effect with the provision omitted, unless\nomission would frustrate the intent of the parties, in which case this\nAgreement will immediately terminate.\n\n\n15. Integration.\n\n\nThis Agreement, including any terms contained in your Entitlement, is\nthe entire agreement between you and Oracle relating to its subject\nmatter. It supersedes all prior or contemporaneous oral or written\ncommunications, proposals, representations and warranties and prevails\nover any conflicting or additional terms of any quote, order,\nacknowledgment, or other communication between the parties relating to\nits subject matter during the term of this Agreement. No modification\nof this Agreement will be binding, unless in writing and signed by an\nauthorized representative of each party.\n\n\nFor inquiries please contact: Oracle Corporation, 500 Oracle Parkway,\nRedwood Shores, California 94065, USA. ZLIB License ZLIB license\n\n\nzlib.h -- interface of the 'zlib' general purpose compression library\nversion 1.2.7, May 2nd, 2012\n\n\nCopyright (C) 1995-2012 Jean-loup Gailly and Mark Adler\n\n\nThis software is provided 'as-is', without any express or implied\nwarranty. In no event will the authors be held liable for any damages\narising from the use of this software.\n\n\nPermission is granted to anyone to use this software for any purpose,\nincluding commercial applications, and to alter it and redistribute it\nfreely, subject to the following restrictions:\n\n\n1. The origin of this software must not be misrepresented; you must not\n claim that you wrote the original software. If you use this software\n in a product, an acknowledgment in the product documentation would be\n appreciated but is not required.\n2. Altered source versions must be plainly marked as such, and must not be\n misrepresented as being the original software.\n3. This notice may not be removed or altered from any source distribution.\n\n\nJean-loup Gailly Mark Adler\njloup@gzip.org madler@alumni.caltech.edu D3.js license (New BSD License) D3.js license (New BSD License)\n\n\nCopyright (c) 2012, Michael Bostock\nAll rights reserved.\n\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n\n* The name Michael Bostock may not be used to endorse or promote products\n derived from this software without specific prior written permission.\n\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL MICHAEL BOSTOCK BE LIABLE FOR ANY DIRECT,\nINDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\nBUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\nOF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\nEVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Lesser GNU Public License (LGPL) GNU LESSER GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n\n Copyright (C) 2007 Free Software Foundation, Inc. \n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n\n\n\n This version of the GNU Lesser General Public License incorporates\nthe terms and conditions of version 3 of the GNU General Public\nLicense, supplemented by the additional permissions listed below.\n\n\n 0. Additional Definitions.\n\n\n As used herein, \"this License\" refers to version 3 of the GNU Lesser\nGeneral Public License, and the \"GNU GPL\" refers to version 3 of the GNU\nGeneral Public License.\n\n\n \"The Library\" refers to a covered work governed by this License,\nother than an Application or a Combined Work as defined below.\n\n\n An \"Application\" is any work that makes use of an interface provided\nby the Library, but which is not otherwise based on the Library.\nDefining a subclass of a class defined by the Library is deemed a mode\nof using an interface provided by the Library.\n\n\n A \"Combined Work\" is a work produced by combining or linking an\nApplication with the Library. The particular version of the Library\nwith which the Combined Work was made is also called the \"Linked\nVersion\".\n\n\n The \"Minimal Corresponding Source\" for a Combined Work means the\nCorresponding Source for the Combined Work, excluding any source code\nfor portions of the Combined Work that, considered in isolation, are\nbased on the Application, and not on the Linked Version.\n\n\n The \"Corresponding Application Code\" for a Combined Work means the\nobject code and/or source code for the Application, including any data\nand utility programs needed for reproducing the Combined Work from the\nApplication, but excluding the System Libraries of the Combined Work.\n\n\n 1. Exception to Section 3 of the GNU GPL.\n\n\n You may convey a covered work under sections 3 and 4 of this License\nwithout being bound by section 3 of the GNU GPL.\n\n\n 2. Conveying Modified Versions.\n\n\n If you modify a copy of the Library, and, in your modifications, a\nfacility refers to a function or data to be supplied by an Application\nthat uses the facility (other than as an argument passed when the\nfacility is invoked), then you may convey a copy of the modified\nversion:\n\n\n a) under this License, provided that you make a good faith effort to\n ensure that, in the event an Application does not supply the\n function or data, the facility still operates, and performs\n whatever part of its purpose remains meaningful, or\n\n\n b) under the GNU GPL, with none of the additional permissions of\n this License applicable to that copy.\n\n\n 3. Object Code Incorporating Material from Library Header Files.\n\n\n The object code form of an Application may incorporate material from\na header file that is part of the Library. You may convey such object\ncode under terms of your choice, provided that, if the incorporated\nmaterial is not limited to numerical parameters, data structure\nlayouts and accessors, or small macros, inline functions and templates\n(ten or fewer lines in length), you do both of the following:\n\n\n a) Give prominent notice with each copy of the object code that the\n Library is used in it and that the Library and its use are\n covered by this License.\n\n\n b) Accompany the object code with a copy of the GNU GPL and this license\n document.\n\n\n 4. Combined Works.\n\n\n You may convey a Combined Work under terms of your choice that,\ntaken together, effectively do not restrict modification of the\nportions of the Library contained in the Combined Work and reverse\nengineering for debugging such modifications, if you also do each of\nthe following:\n\n\n a) Give prominent notice with each copy of the Combined Work that\n the Library is used in it and that the Library and its use are\n covered by this License.\n\n\n b) Accompany the Combined Work with a copy of the GNU GPL and this license\n document.\n\n\n c) For a Combined Work that displays copyright notices during\n execution, include the copyright notice for the Library among\n these notices, as well as a reference directing the user to the\n copies of the GNU GPL and this license document.\n\n\n d) Do one of the following:\n\n\n 0) Convey the Minimal Corresponding Source under the terms of this\n License, and the Corresponding Application Code in a form\n suitable for, and under terms that permit, the user to\n recombine or relink the Application with a modified version of\n the Linked Version to produce a modified Combined Work, in the\n manner specified by section 6 of the GNU GPL for conveying\n Corresponding Source.\n\n\n 1) Use a suitable shared library mechanism for linking with the\n Library. A suitable mechanism is one that (a) uses at run time\n a copy of the Library already present on the user's computer\n system, and (b) will operate properly with a modified version\n of the Library that is interface-compatible with the Linked\n Version.\n\n\n e) Provide Installation Information, but only if you would otherwise\n be required to provide such information under section 6 of the\n GNU GPL, and only to the extent that such information is\n necessary to install and execute a modified version of the\n Combined Work produced by recombining or relinking the\n Application with a modified version of the Linked Version. (If\n you use option 4d0, the Installation Information must accompany\n the Minimal Corresponding Source and Corresponding Application\n Code. If you use option 4d1, you must provide the Installation\n Information in the manner specified by section 6 of the GNU GPL\n for conveying Corresponding Source.)\n\n\n 5. Combined Libraries.\n\n\n You may place library facilities that are a work based on the\nLibrary side by side in a single library together with other library\nfacilities that are not Applications and are not covered by this\nLicense, and convey such a combined library under terms of your\nchoice, if you do both of the following:\n\n\n a) Accompany the combined library with a copy of the same work based\n on the Library, uncombined with any other library facilities,\n conveyed under the terms of this License.\n\n\n b) Give prominent notice with the combined library that part of it\n is a work based on the Library, and explaining where to find the\n accompanying uncombined form of the same work.\n\n\n 6. Revised Versions of the GNU Lesser General Public License.\n\n\n The Free Software Foundation may publish revised and/or new versions\nof the GNU Lesser General Public License from time to time. Such new\nversions will be similar in spirit to the present version, but may\ndiffer in detail to address new problems or concerns.\n\n\n Each version is given a distinguishing version number. If the\nLibrary as you received it specifies that a certain numbered version\nof the GNU Lesser General Public License \"or any later version\"\napplies to it, you have the option of following the terms and\nconditions either of that published version or of any later version\npublished by the Free Software Foundation. If the Library as you\nreceived it does not specify a version number of the GNU Lesser\nGeneral Public License, you may choose any version of the GNU Lesser\nGeneral Public License ever published by the Free Software Foundation.\n\n\n If the Library as you received it specifies that a proxy can decide\nwhether future versions of the GNU Lesser General Public License shall\napply, that proxy's public statement of acceptance of any version is\npermanent authorization for you to choose that version for the\nLibrary. Lesser GNU Public License (LGPL) v2.1 Lesser GNU Public License (LGPL) v2.1\n\n\n GNU LESSER GENERAL PUBLIC LICENSE\n Version 2.1, February 1999\n\n\n Copyright (C) 1991, 1999 Free Software Foundation, Inc.\n 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n\n[This is the first released version of the Lesser GPL. It also counts\n as the successor of the GNU Library Public License, version 2, hence\n the version number 2.1.]\n\n\n Preamble\n\n\n The licenses for most software are designed to take away your\nfreedom to share and change it. By contrast, the GNU General Public\nLicenses are intended to guarantee your freedom to share and change\nfree software--to make sure the software is free for all its users.\n\n\n This license, the Lesser General Public License, applies to some\nspecially designated software packages--typically libraries--of the\nFree Software Foundation and other authors who decide to use it. You\ncan use it too, but we suggest you first think carefully about whether\nthis license or the ordinary General Public License is the better\nstrategy to use in any particular case, based on the explanations below.\n\n\n When we speak of free software, we are referring to freedom of use,\nnot price. Our General Public Licenses are designed to make sure that\nyou have the freedom to distribute copies of free software (and charge\nfor this service if you wish); that you receive source code or can get\nit if you want it; that you can change the software and use pieces of\nit in new free programs; and that you are informed that you can do\nthese things.\n\n\n To protect your rights, we need to make restrictions that forbid\ndistributors to deny you these rights or to ask you to surrender these\nrights. These restrictions translate to certain responsibilities for\nyou if you distribute copies of the library or if you modify it.\n\n\n For example, if you distribute copies of the library, whether gratis\nor for a fee, you must give the recipients all the rights that we gave\nyou. You must make sure that they, too, receive or can get the source\ncode. If you link other code with the library, you must provide\ncomplete object files to the recipients, so that they can relink them\nwith the library after making changes to the library and recompiling\nit. And you must show them these terms so they know their rights.\n\n\n We protect your rights with a two-step method: (1) we copyright the\nlibrary, and (2) we offer you this license, which gives you legal\npermission to copy, distribute and/or modify the library.\n\n\n To protect each distributor, we want to make it very clear that\nthere is no warranty for the free library. Also, if the library is\nmodified by someone else and passed on, the recipients should know\nthat what they have is not the original version, so that the original\nauthor's reputation will not be affected by problems that might be\nintroduced by others.\n\n\n Finally, software patents pose a constant threat to the existence of\nany free program. We wish to make sure that a company cannot\neffectively restrict the users of a free program by obtaining a\nrestrictive license from a patent holder. Therefore, we insist that\nany patent license obtained for a version of the library must be\nconsistent with the full freedom of use specified in this license.\n\n\n Most GNU software, including some libraries, is covered by the\nordinary GNU General Public License. This license, the GNU Lesser\nGeneral Public License, applies to certain designated libraries, and\nis quite different from the ordinary General Public License. We use\nthis license for certain libraries in order to permit linking those\nlibraries into non-free programs.\n\n\n When a program is linked with a library, whether statically or using\na shared library, the combination of the two is legally speaking a\ncombined work, a derivative of the original library. The ordinary\nGeneral Public License therefore permits such linking only if the\nentire combination fits its criteria of freedom. The Lesser General\nPublic License permits more lax criteria for linking other code with\nthe library.\n\n\n We call this license the \"Lesser\" General Public License because it\ndoes Less to protect the user's freedom than the ordinary General\nPublic License. It also provides other free software developers Less\nof an advantage over competing non-free programs. These disadvantages\nare the reason we use the ordinary General Public License for many\nlibraries. However, the Lesser license provides advantages in certain\nspecial circumstances.\n\n\n For example, on rare occasions, there may be a special need to\nencourage the widest possible use of a certain library, so that it becomes\na de-facto standard. To achieve this, non-free programs must be\nallowed to use the library. A more frequent case is that a free\nlibrary does the same job as widely used non-free libraries. In this\ncase, there is little to gain by limiting the free library to free\nsoftware only, so we use the Lesser General Public License.\n\n\n In other cases, permission to use a particular library in non-free\nprograms enables a greater number of people to use a large body of\nfree software. For example, permission to use the GNU C Library in\nnon-free programs enables many more people to use the whole GNU\noperating system, as well as its variant, the GNU/Linux operating\nsystem.\n\n\n Although the Lesser General Public License is Less protective of the\nusers' freedom, it does ensure that the user of a program that is\nlinked with the Library has the freedom and the wherewithal to run\nthat program using a modified version of the Library.\n\n\n The precise terms and conditions for copying, distribution and\nmodification follow. Pay close attention to the difference between a\n\"work based on the library\" and a \"work that uses the library\". The\nformer contains code derived from the library, whereas the latter must\nbe combined with the library in order to run.\n\n\n GNU LESSER GENERAL PUBLIC LICENSE\n TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION\n\n\n 0. This License Agreement applies to any software library or other\nprogram which contains a notice placed by the copyright holder or\nother authorized party saying it may be distributed under the terms of\nthis Lesser General Public License (also called \"this License\").\nEach licensee is addressed as \"you\".\n\n\n A \"library\" means a collection of software functions and/or data\nprepared so as to be conveniently linked with application programs\n(which use some of those functions and data) to form executables.\n\n\n The \"Library\", below, refers to any such software library or work\nwhich has been distributed under these terms. A \"work based on the\nLibrary\" means either the Library or any derivative work under\ncopyright law: that is to say, a work containing the Library or a\nportion of it, either verbatim or with modifications and/or translated\nstraightforwardly into another language. (Hereinafter, translation is\nincluded without limitation in the term \"modification\".)\n\n\n \"Source code\" for a work means the preferred form of the work for\nmaking modifications to it. For a library, complete source code means\nall the source code for all modules it contains, plus any associated\ninterface definition files, plus the scripts used to control compilation\nand installation of the library.\n\n\n Activities other than copying, distribution and modification are not\ncovered by this License; they are outside its scope. The act of\nrunning a program using the Library is not restricted, and output from\nsuch a program is covered only if its contents constitute a work based\non the Library (independent of the use of the Library in a tool for\nwriting it). Whether that is true depends on what the Library does\nand what the program that uses the Library does.\n\n\n 1. You may copy and distribute verbatim copies of the Library's\ncomplete source code as you receive it, in any medium, provided that\nyou conspicuously and appropriately publish on each copy an\nappropriate copyright notice and disclaimer of warranty; keep intact\nall the notices that refer to this License and to the absence of any\nwarranty; and distribute a copy of this License along with the\nLibrary.\n\n\n You may charge a fee for the physical act of transferring a copy,\nand you may at your option offer warranty protection in exchange for a\nfee.\n\n\n 2. You may modify your copy or copies of the Library or any portion\nof it, thus forming a work based on the Library, and copy and\ndistribute such modifications or work under the terms of Section 1\nabove, provided that you also meet all of these conditions:\n\n\n a) The modified work must itself be a software library.\n\n\n b) You must cause the files modified to carry prominent notices\n stating that you changed the files and the date of any change.\n\n\n c) You must cause the whole of the work to be licensed at no\n charge to all third parties under the terms of this License.\n\n\n d) If a facility in the modified Library refers to a function or a\n table of data to be supplied by an application program that uses\n the facility, other than as an argument passed when the facility\n is invoked, then you must make a good faith effort to ensure that,\n in the event an application does not supply such function or\n table, the facility still operates, and performs whatever part of\n its purpose remains meaningful.\n\n\n (For example, a function in a library to compute square roots has\n a purpose that is entirely well-defined independent of the\n application. Therefore, Subsection 2d requires that any\n application-supplied function or table used by this function must\n be optional: if the application does not supply it, the square\n root function must still compute square roots.)\n\n\nThese requirements apply to the modified work as a whole. If\nidentifiable sections of that work are not derived from the Library,\nand can be reasonably considered independent and separate works in\nthemselves, then this License, and its terms, do not apply to those\nsections when you distribute them as separate works. But when you\ndistribute the same sections as part of a whole which is a work based\non the Library, the distribution of the whole must be on the terms of\nthis License, whose permissions for other licensees extend to the\nentire whole, and thus to each and every part regardless of who wrote\nit.\n\n\nThus, it is not the intent of this section to claim rights or contest\nyour rights to work written entirely by you; rather, the intent is to\nexercise the right to control the distribution of derivative or\ncollective works based on the Library.\n\n\nIn addition, mere aggregation of another work not based on the Library\nwith the Library (or with a work based on the Library) on a volume of\na storage or distribution medium does not bring the other work under\nthe scope of this License.\n\n\n 3. You may opt to apply the terms of the ordinary GNU General Public\nLicense instead of this License to a given copy of the Library. To do\nthis, you must alter all the notices that refer to this License, so\nthat they refer to the ordinary GNU General Public License, version 2,\ninstead of to this License. (If a newer version than version 2 of the\nordinary GNU General Public License has appeared, then you can specify\nthat version instead if you wish.) Do not make any other change in\nthese notices.\n\n\n Once this change is made in a given copy, it is irreversible for\nthat copy, so the ordinary GNU General Public License applies to all\nsubsequent copies and derivative works made from that copy.\n\n\n This option is useful when you wish to copy part of the code of\nthe Library into a program that is not a library.\n\n\n 4. You may copy and distribute the Library (or a portion or\nderivative of it, under Section 2) in object code or executable form\nunder the terms of Sections 1 and 2 above provided that you accompany\nit with the complete corresponding machine-readable source code, which\nmust be distributed under the terms of Sections 1 and 2 above on a\nmedium customarily used for software interchange.\n\n\n If distribution of object code is made by offering access to copy\nfrom a designated place, then offering equivalent access to copy the\nsource code from the same place satisfies the requirement to\ndistribute the source code, even though third parties are not\ncompelled to copy the source along with the object code.\n\n\n 5. A program that contains no derivative of any portion of the\nLibrary, but is designed to work with the Library by being compiled or\nlinked with it, is called a \"work that uses the Library\". Such a\nwork, in isolation, is not a derivative work of the Library, and\ntherefore falls outside the scope of this License.\n\n\n However, linking a \"work that uses the Library\" with the Library\ncreates an executable that is a derivative of the Library (because it\ncontains portions of the Library), rather than a \"work that uses the\nlibrary\". The executable is therefore covered by this License.\nSection 6 states terms for distribution of such executables.\n\n\n When a \"work that uses the Library\" uses material from a header file\nthat is part of the Library, the object code for the work may be a\nderivative work of the Library even though the source code is not.\nWhether this is true is especially significant if the work can be\nlinked without the Library, or if the work is itself a library. The\nthreshold for this to be true is not precisely defined by law.\n\n\n If such an object file uses only numerical parameters, data\nstructure layouts and accessors, and small macros and small inline\nfunctions (ten lines or less in length), then the use of the object\nfile is unrestricted, regardless of whether it is legally a derivative\nwork. (Executables containing this object code plus portions of the\nLibrary will still fall under Section 6.)\n\n\n Otherwise, if the work is a derivative of the Library, you may\ndistribute the object code for the work under the terms of Section 6.\nAny executables containing that work also fall under Section 6,\nwhether or not they are linked directly with the Library itself.\n\n\n 6. As an exception to the Sections above, you may also combine or\nlink a \"work that uses the Library\" with the Library to produce a\nwork containing portions of the Library, and distribute that work\nunder terms of your choice, provided that the terms permit\nmodification of the work for the customer's own use and reverse\nengineering for debugging such modifications.\n\n\n You must give prominent notice with each copy of the work that the\nLibrary is used in it and that the Library and its use are covered by\nthis License. You must supply a copy of this License. If the work\nduring execution displays copyright notices, you must include the\ncopyright notice for the Library among them, as well as a reference\ndirecting the user to the copy of this License. Also, you must do one\nof these things:\n\n\n a) Accompany the work with the complete corresponding\n machine-readable source code for the Library including whatever\n changes were used in the work (which must be distributed under\n Sections 1 and 2 above); and, if the work is an executable linked\n with the Library, with the complete machine-readable \"work that\n uses the Library\", as object code and/or source code, so that the\n user can modify the Library and then relink to produce a modified\n executable containing the modified Library. (It is understood\n that the user who changes the contents of definitions files in the\n Library will not necessarily be able to recompile the application\n to use the modified definitions.)\n\n\n b) Use a suitable shared library mechanism for linking with the\n Library. A suitable mechanism is one that (1) uses at run time a\n copy of the library already present on the user's computer system,\n rather than copying library functions into the executable, and (2)\n will operate properly with a modified version of the library, if\n the user installs one, as long as the modified version is\n interface-compatible with the version that the work was made with.\n\n\n c) Accompany the work with a written offer, valid for at\n least three years, to give the same user the materials\n specified in Subsection 6a, above, for a charge no more\n than the cost of performing this distribution.\n\n\n d) If distribution of the work is made by offering access to copy\n from a designated place, offer equivalent access to copy the above\n specified materials from the same place.\n\n\n e) Verify that the user has already received a copy of these\n materials or that you have already sent this user a copy.\n\n\n For an executable, the required form of the \"work that uses the\nLibrary\" must include any data and utility programs needed for\nreproducing the executable from it. However, as a special exception,\nthe materials to be distributed need not include anything that is\nnormally distributed (in either source or binary form) with the major\ncomponents (compiler, kernel, and so on) of the operating system on\nwhich the executable runs, unless that component itself accompanies\nthe executable.\n\n\n It may happen that this requirement contradicts the license\nrestrictions of other proprietary libraries that do not normally\naccompany the operating system. Such a contradiction means you cannot\nuse both them and the Library together in an executable that you\ndistribute.\n\n\n 7. You may place library facilities that are a work based on the\nLibrary side-by-side in a single library together with other library\nfacilities not covered by this License, and distribute such a combined\nlibrary, provided that the separate distribution of the work based on\nthe Library and of the other library facilities is otherwise\npermitted, and provided that you do these two things:\n\n\n a) Accompany the combined library with a copy of the same work\n based on the Library, uncombined with any other library\n facilities. This must be distributed under the terms of the\n Sections above.\n\n\n b) Give prominent notice with the combined library of the fact\n that part of it is a work based on the Library, and explaining\n where to find the accompanying uncombined form of the same work.\n\n\n 8. You may not copy, modify, sublicense, link with, or distribute\nthe Library except as expressly provided under this License. Any\nattempt otherwise to copy, modify, sublicense, link with, or\ndistribute the Library is void, and will automatically terminate your\nrights under this License. However, parties who have received copies,\nor rights, from you under this License will not have their licenses\nterminated so long as such parties remain in full compliance.\n\n\n 9. You are not required to accept this License, since you have not\nsigned it. However, nothing else grants you permission to modify or\ndistribute the Library or its derivative works. These actions are\nprohibited by law if you do not accept this License. Therefore, by\nmodifying or distributing the Library (or any work based on the\nLibrary), you indicate your acceptance of this License to do so, and\nall its terms and conditions for copying, distributing or modifying\nthe Library or works based on it.\n\n\n 10. Each time you redistribute the Library (or any work based on the\nLibrary), the recipient automatically receives a license from the\noriginal licensor to copy, distribute, link with or modify the Library\nsubject to these terms and conditions. You may not impose any further\nrestrictions on the recipients' exercise of the rights granted herein.\nYou are not responsible for enforcing compliance by third parties with\nthis License.\n\n\n 11. If, as a consequence of a court judgment or allegation of patent\ninfringement or for any other reason (not limited to patent issues),\nconditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot\ndistribute so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you\nmay not distribute the Library at all. For example, if a patent\nlicense would not permit royalty-free redistribution of the Library by\nall those who receive copies directly or indirectly through you, then\nthe only way you could satisfy both it and this License would be to\nrefrain entirely from distribution of the Library.\n\n\nIf any portion of this section is held invalid or unenforceable under any\nparticular circumstance, the balance of the section is intended to apply,\nand the section as a whole is intended to apply in other circumstances.\n\n\nIt is not the purpose of this section to induce you to infringe any\npatents or other property right claims or to contest validity of any\nsuch claims; this section has the sole purpose of protecting the\nintegrity of the free software distribution system which is\nimplemented by public license practices. Many people have made\ngenerous contributions to the wide range of software distributed\nthrough that system in reliance on consistent application of that\nsystem; it is up to the author/donor to decide if he or she is willing\nto distribute software through any other system and a licensee cannot\nimpose that choice.\n\n\nThis section is intended to make thoroughly clear what is believed to\nbe a consequence of the rest of this License.\n\n\n 12. If the distribution and/or use of the Library is restricted in\ncertain countries either by patents or by copyrighted interfaces, the\noriginal copyright holder who places the Library under this License may add\nan explicit geographical distribution limitation excluding those countries,\nso that distribution is permitted only in or among countries not thus\nexcluded. In such case, this License incorporates the limitation as if\nwritten in the body of this License.\n\n\n 13. The Free Software Foundation may publish revised and/or new\nversions of the Lesser General Public License from time to time.\nSuch new versions will be similar in spirit to the present version,\nbut may differ in detail to address new problems or concerns.\n\n\nEach version is given a distinguishing version number. If the Library\nspecifies a version number of this License which applies to it and\n\"any later version\", you have the option of following the terms and\nconditions either of that version or of any later version published by\nthe Free Software Foundation. If the Library does not specify a\nlicense version number, you may choose any version ever published by\nthe Free Software Foundation.\n\n\n 14. If you wish to incorporate parts of the Library into other free\nprograms whose distribution conditions are incompatible with these,\nwrite to the author to ask for permission. For software which is\ncopyrighted by the Free Software Foundation, write to the Free\nSoftware Foundation; we sometimes make exceptions for this. Our\ndecision will be guided by the two goals of preserving the free status\nof all derivatives of our free software and of promoting the sharing\nand reuse of software generally.\n\n\n NO WARRANTY\n\n\n 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO\nWARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.\nEXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR\nOTHER PARTIES PROVIDE THE LIBRARY \"AS IS\" WITHOUT WARRANTY OF ANY\nKIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE\nLIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME\nTHE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n\n 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN\nWRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY\nAND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU\nFOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR\nCONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE\nLIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING\nRENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A\nFAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF\nSUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGES.\n\n\n END OF TERMS AND CONDITIONS\n\n\n How to Apply These Terms to Your New Libraries\n\n\n If you develop a new library, and you want it to be of the greatest\npossible use to the public, we recommend making it free software that\neveryone can redistribute and change. You can do so by permitting\nredistribution under these terms (or, alternatively, under the terms of the\nordinary General Public License).\n\n\n To apply these terms, attach the following notices to the library. It is\nsafest to attach them to the start of each source file to most effectively\nconvey the exclusion of warranty; and each file should have at least the\n\"copyright\" line and a pointer to where the full notice is found.\n\n\n \n Copyright (C) \n\n\n This library is free software; you can redistribute it and/or\n modify it under the terms of the GNU Lesser General Public\n License as published by the Free Software Foundation; either\n version 2.1 of the License, or (at your option) any later version.\n\n\n This library is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n Lesser General Public License for more details.\n\n\n You should have received a copy of the GNU Lesser General Public\n License along with this library; if not, write to the Free Software\n Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n\n\nAlso add information on how to contact you by electronic and paper mail.\n\n\nYou should also get your employer (if you work as a programmer) or your\nschool, if any, to sign a \"copyright disclaimer\" for the library, if\nnecessary. Here is a sample; alter the names:\n\n\n Yoyodyne, Inc., hereby disclaims all copyright interest in the\n library `Frob' (a library for tweaking knobs) written by James Random Hacker.\n\n\n , 1 April 1990\n Ty Coon, President of Vice\n\n\nThat's all there is to it! Boost Software License - Version 1.0 - August 17th, 2003 Boost Software License - Version 1.0 - August 17th, 2003\n\n\nPermission is hereby granted, free of charge, to any person or organization\nobtaining a copy of the software and accompanying documentation covered by\nthis license (the \"Software\") to use, reproduce, display, distribute,\nexecute, and transmit the Software, and to prepare derivative works of the\nSoftware, and to permit third-parties to whom the Software is furnished to\ndo so, all subject to the following:\n\n\nThe copyright notices in the Software and this entire statement, including\nthe above license grant, this restriction and the following disclaimer,\nmust be included in all copies of the Software, in whole or in part, and\nall derivative works of the Software, unless such copies or derivative\nworks are solely in the form of machine-executable object code generated by\na source language processor.\n\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT\nSHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE\nFOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE. GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 GNU GENERAL PUBLIC LICENSE\n Version 3, 29 June 2007\n\n\n Copyright (C) 2007 Free Software Foundation, Inc. \n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n\n Preamble\n\n\n The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n\n The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works. By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users. We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors. You can apply it to\nyour programs, too.\n\n\n When we speak of free software, we are referring to freedom, not\nprice. Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n\n To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights. Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n\n For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received. You must make sure that they, too, receive\nor can get the source code. And you must show them these terms so they\nknow their rights.\n\n\n Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n\n For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software. For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n\n Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so. This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software. The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable. Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts. If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n\n Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary. To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n\n The precise terms and conditions for copying, distribution and\nmodification follow.\n\n\n TERMS AND CONDITIONS\n\n\n 0. Definitions.\n\n\n \"This License\" refers to version 3 of the GNU General Public License.\n\n\n \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n\n \"The Program\" refers to any copyrightable work licensed under this\nLicense. Each licensee is addressed as \"you\". \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n\n To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy. The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n\n A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n\n To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy. Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n\n To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies. Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n\n An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License. If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n\n 1. Source Code.\n\n\n The \"source code\" for a work means the preferred form of the work\nfor making modifications to it. \"Object code\" means any non-source\nform of a work.\n\n\n A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n\n The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form. A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n\n The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities. However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work. For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n\n The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n\n The Corresponding Source for a work in source code form is that\nsame work.\n\n\n 2. Basic Permissions.\n\n\n All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met. This License explicitly affirms your unlimited\npermission to run the unmodified Program. The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work. This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n\n You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force. You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright. Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n\n Conveying under any other circumstances is permitted solely under\nthe conditions stated below. Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n\n 3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n\n No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n\n When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n\n 4. Conveying Verbatim Copies.\n\n\n You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n\n You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n\n 5. Conveying Modified Source Versions.\n\n\n You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n\n a) The work must carry prominent notices stating that you modified\n it, and giving a relevant date.\n\n\n b) The work must carry prominent notices stating that it is\n released under this License and any conditions added under section\n 7. This requirement modifies the requirement in section 4 to\n \"keep intact all notices\".\n\n\n c) You must license the entire work, as a whole, under this\n License to anyone who comes into possession of a copy. This\n License will therefore apply, along with any applicable section 7\n additional terms, to the whole of the work, and all its parts,\n regardless of how they are packaged. This License gives no\n permission to license the work in any other way, but it does not\n invalidate such permission if you have separately received it.\n\n\n d) If the work has interactive user interfaces, each must display\n Appropriate Legal Notices; however, if the Program has interactive\n interfaces that do not display Appropriate Legal Notices, your\n work need not make them do so.\n\n\n A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit. Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n\n 6. Conveying Non-Source Forms.\n\n\n You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n\n a) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by the\n Corresponding Source fixed on a durable physical medium\n customarily used for software interchange.\n\n\n b) Convey the object code in, or embodied in, a physical product\n (including a physical distribution medium), accompanied by a\n written offer, valid for at least three years and valid for as\n long as you offer spare parts or customer support for that product\n model, to give anyone who possesses the object code either (1) a\n copy of the Corresponding Source for all the software in the\n product that is covered by this License, on a durable physical\n medium customarily used for software interchange, for a price no\n more than your reasonable cost of physically performing this\n conveying of source, or (2) access to copy the\n Corresponding Source from a network server at no charge.\n\n\n c) Convey individual copies of the object code with a copy of the\n written offer to provide the Corresponding Source. This\n alternative is allowed only occasionally and noncommercially, and\n only if you received the object code with such an offer, in accord\n with subsection 6b.\n\n\n d) Convey the object code by offering access from a designated\n place (gratis or for a charge), and offer equivalent access to the\n Corresponding Source in the same way through the same place at no\n further charge. You need not require recipients to copy the\n Corresponding Source along with the object code. If the place to\n copy the object code is a network server, the Corresponding Source\n may be on a different server (operated by you or a third party)\n that supports equivalent copying facilities, provided you maintain\n clear directions next to the object code saying where to find the\n Corresponding Source. Regardless of what server hosts the\n Corresponding Source, you remain obligated to ensure that it is\n available for as long as needed to satisfy these requirements.\n\n\n e) Convey the object code using peer-to-peer transmission, provided\n you inform other peers where the object code and Corresponding\n Source of the work are being offered to the general public at no\n charge under subsection 6d.\n\n\n A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n\n A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling. In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage. For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product. A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n\n \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source. The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n\n If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information. But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n\n The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed. Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n\n Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n\n 7. Additional Terms.\n\n\n \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law. If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n\n When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit. (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.) You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n\n Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n\n a) Disclaiming warranty or limiting liability differently from the\n terms of sections 15 and 16 of this License; or\n\n\n b) Requiring preservation of specified reasonable legal notices or\n author attributions in that material or in the Appropriate Legal\n Notices displayed by works containing it; or\n\n\n c) Prohibiting misrepresentation of the origin of that material, or\n requiring that modified versions of such material be marked in\n reasonable ways as different from the original version; or\n\n\n d) Limiting the use for publicity purposes of names of licensors or\n authors of the material; or\n\n\n e) Declining to grant rights under trademark law for use of some\n trade names, trademarks, or service marks; or\n\n\n f) Requiring indemnification of licensors and authors of that\n material by anyone who conveys the material (or modified versions of\n it) with contractual assumptions of liability to the recipient, for\n any liability that these contractual assumptions directly impose on\n those licensors and authors.\n\n\n All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10. If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term. If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n\n If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n\n Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n\n 8. Termination.\n\n\n You may not propagate or modify a covered work except as expressly\nprovided under this License. Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n\n However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n\n Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n\n Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License. If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n\n 9. Acceptance Not Required for Having Copies.\n\n\n You are not required to accept this License in order to receive or\nrun a copy of the Program. Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance. However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work. These actions infringe copyright if you do\nnot accept this License. Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n\n 10. Automatic Licensing of Downstream Recipients.\n\n\n Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License. You are not responsible\nfor enforcing compliance by third parties with this License.\n\n\n An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations. If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n\n You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License. For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n\n 11. Patents.\n\n\n A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based. The\nwork thus licensed is called the contributor's \"contributor version\".\n\n\n A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version. For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n\n Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n\n In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement). To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n\n If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients. \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n\n If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n\n A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License. You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n\n Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n\n 12. No Surrender of Others' Freedom.\n\n\n If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License. If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all. For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n\n 13. Use with the GNU Affero General Public License.\n\n\n Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work. The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n\n 14. Revised Versions of this License.\n\n\n The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time. Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n\n Each version is given a distinguishing version number. If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation. If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n\n If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n\n Later license versions may give you additional or different\npermissions. However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n\n 15. Disclaimer of Warranty.\n\n\n THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n\n 16. Limitation of Liability.\n\n\n IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n\n 17. Interpretation of Sections 15 and 16.\n\n\n If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n\n END OF TERMS AND CONDITIONS\n\n\n How to Apply These Terms to Your New Programs\n\n\n If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n\n To do so, attach the following notices to the program. It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n\n \n Copyright (C) \n\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\n\nAlso add information on how to contact you by electronic and paper mail.\n\n\n If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n\n Copyright (C) \n This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n This is free software, and you are welcome to redistribute it\n under certain conditions; type `show c' for details.\n\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License. Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n\n You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n.\n\n\n The GNU General Public License does not permit incorporating your program\ninto proprietary programs. If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library. If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License. But first, please read\n. (Topic last modified: 2024-01-07) On this page About the NOTICE.txt File Open Source Notice Project-Specific Copyright, Source Code, and License Information Apache License MIT License License for uuid License for JavaMail ZLIB License D3.js license (New BSD License) Lesser GNU Public License (LGPL) Lesser GNU Public License (LGPL) v2.1 Boost Software License - Version 1.0 - August 17th, 2003 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/open_source_projects_760.html", + "title": "Open-Source Software Acknowledgements (Release 7.6.0)" + }, + { + "content": "\nOther Resources Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and\n the HPE Ezmeral Data Fabric community. In addition to the product documentation, you may be interested in the following\n resources: Training https://learn.software.hpe.com/ Blogs and Videos https://community.hpe.com/t5/hpe-ezmeral-uncut/bg-p/software#.XzXV2-hKg2w HPE Ezmeral Software Community HPE Ezmeral Software Community HPE Developer Community https://developer.hpe.com/ Slack Community for Developers https://slack.hpedev.io/ HPE Support Center https://support.hpe.com/ Contact HPE https://www.hpe.com/us/en/contact-hpe.html Videos, Reports, and Case Studies https://www.hpe.com/us/en/resource-library.html HPE GreenLake Marketplace https://www.hpe.com/us/en/software/marketplace.html/platform/ezmeraldata (Topic last modified: 2024-01-16) Recommended Resources Training Modules for HPE Ezmeral \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/more_resources.html", + "title": "Other Resources" + }, + { + "content": "\nContact HPE Jump to main content Get Started Platform Administration Reference Home Reference Provides reference information for the HPE Ezmeral Data Fabric . Contact HPE Provides a link to contact HPE Sales or Support. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Reference Provides reference information for the HPE Ezmeral Data Fabric . Release History Describes the currently released versions of the HPE Ezmeral Data Fabric as-a-service platform. Cloud Instance Specifications Compares different aspects of the supported cloud instances of the HPE Ezmeral Data Fabric . Third-Party Storage Solutions Describes global-namespace support for HPE partner storage technologies, including Scality, WEKA, and VAST. Port Information Describes the ports used by HPE Ezmeral Data Fabric services. maprcli Commands in This Guide Describes how to use maprcli commands provided as reference links in this guide. Operating System Support Matrix The tables on this page show the Linux operating-system versions that are supported for HPE Ezmeral Data Fabric releases. Doc Site Available as a PDF Provides a link to the downloadable PDF file containing all the information for the current release. Product Licensing Provides information related to product licensing. Other Resources Provides links to additional resources such as on-demand training, videos, blogs, and the HPE Ezmeral Data Fabric community. Contact HPE Provides a link to contact HPE Sales or Support. Contact HPE Provides a link to contact HPE Sales or Support. Contact HPE (Topic last modified: 2024-01-16) Recommended Resources Training Modules for HPE Ezmeral \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/reference/contact_hpe.html", + "title": "Contact HPE" + }, + { + "content": "\nGlossary Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. Glossary List of terms (with description) used in HPE Ezmeral Data Fabric documentation. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. (Topic last modified: 2020-07-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/glossary.html", + "title": "Glossary" + }, + { + "content": "\n.snapshot Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. .snapshot A special directory in the top level of each volume that contains all the snapshots\n created or preserved for the volume. (Topic last modified: 2023-04-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_.snapshot.html", + "title": ".snapshot" + }, + { + "content": "\naccess control expression (ACE) Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have\n access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. Access Control Expression (ACE) NOTE: An ACE (up to 64KB in length) is a combination of users, groups,\n and/or roles for whom access (to volume data) is defined using boolean expressions and\n sub expressions within single quotes. When you pass in an access type that has already\n been set, the new value replaces the existing value for that access type. There is no\n change to access types that are not passed in with the command, whether or not they were\n set. (Topic last modified: 2023-08-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_access_control_expression__ace.html", + "title": "access control expression (ACE)" + }, + { + "content": "\naccess control list (ACL) Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. Access Control List (ACL) NOTE: An Access Control List (ACL) is a list of users or groups. Each\n user or group in the list is paired with a defined set of permissions that limit the\n actions that the user or group can perform on the object secured by the ACL. In the HPE Ezmeral Data Fabric ,\n the objects secured by ACLs are the job queue, volumes, and the cluster\n itself. (Topic last modified: 2023-08-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_access_control_list__acl.html", + "title": "access control list (ACL)" + }, + { + "content": "\naccess policy Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM\n users permissions to perform resource operations, such as putting objects in a bucket. You\n associate access policies with accounts, users, buckets, and objects. (Topic last modified: 2022-01-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/glossary-access-policy.html", + "title": "access policy" + }, + { + "content": "\nadministrator Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. administrator A user or users with special privileges to administer the cluster or cluster\n resources. Administrative functions can include managing hardware resources, users,\n data, services, security, and availability. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_administrator.html", + "title": "administrator" + }, + { + "content": "\nadvisory quota Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_advisory_quota.html", + "title": "advisory quota" + }, + { + "content": "\nair gap Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. air gap Physical isolation between a computer system and unsecured networks. To enhance\n security, air-gapped computer systems are disconnected from other systems and\n networks. (Topic last modified: 2021-04-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_air_gap.html", + "title": "air gap" + }, + { + "content": "\nchunk Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are\n normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but\n tuning the size correctly is important. Files inherit the chunk size settings of the\n directory that contains them, as do subdirectories on which chunk size has not been\n explicitly set. Any files written by a Hadoop application, whether via the file APIs or\n over NFS, use chunk size specified by the settings for the directory where the file is\n written. (Topic last modified: 2023-04-07) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_chunk.html", + "title": "chunk" + }, + { + "content": "\nclient node Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. client node A node that runs the mapr-client that can access every cluster node and\n is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge\n nodes are NOT part of a data-fabric cluster. See also node and edge node . (Topic last modified: 2022-06-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_client_node.html", + "title": "client node" + }, + { + "content": "\ncluster admin Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. cluster admin The data-fabric user . HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. cluster admin The data-fabric\n user . For more information, see data-fabric user . (Topic last modified: 2020-07-21) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_cluster_admin.html", + "title": "cluster admin" + }, + { + "content": "\ncompute node Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. compute node A compute node is used to process data using a compute engine (for example, YARN, Hive,\n Spark, or Drill). A compute node is by definition a data-fabric cluster node. See also node . Compare with data node . (Topic last modified: 2022-06-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_compute_node.html", + "title": "compute node" + }, + { + "content": "\ncontainer Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data\n container. More information Docker containers YARN resource containers (Topic last modified: 2023-04-15) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_container.html", + "title": "container" + }, + { + "content": "\ncontainer location database (CLDB) Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and\n other cluster information. NOTE: The Container Location Database (CLDB) service tracks the following information\n about every container in the file system: The node where the container is located Size of the container The volume to which the container belongs The policies, quotas, and usage for that volume For more information about the CLDB, see the following HPE Ezmeral Data Fabric \u2013 Customer Managed documentation topic: CLDB . (Topic last modified: 2023-05-22) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_container_location_database__cldb.html", + "title": "container location database (CLDB)" + }, + { + "content": "\ncore Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. core The minimum complement of software packages required to construct a data-fabric cluster. These\n packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not\n part of core. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_core.html", + "title": "core" + }, + { + "content": "\ndata-access gateway Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. data-access\n gateway A service that acts as a proxy and gateway for translating requests between\n lightweight client applications and the data-fabric cluster. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_mapr_data_access_gateway.html", + "title": "data-access gateway" + }, + { + "content": "\ndata compaction Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. data compaction A process that enables users to remove empty or deleted space in the database and\n to compact the database to occupy contiguous space. More information log compaction (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_data_compaction.html", + "title": "data compaction" + }, + { + "content": "\ndata container Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a\n cascaded configuration (master replicates to replica1, replica1 replicates to replica2,\n and so on). Every data container is either a master container, an intermediate\n container, or a tail container depending on its replication role. (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_data_container.html", + "title": "data container" + }, + { + "content": "\ndata fabric Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. data fabric A collection of nodes that work together under a unified architecture, along with the\n services or technologies running on that architecture. A fabric is similar to a Linux\n cluster. Fabrics help you manage your data, making it possible to access, integrate,\n model, analyze, and provision your data seamlessly. (Topic last modified: 2023-06-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_data_fabric.html", + "title": "data fabric" + }, + { + "content": "\ndata-fabric administrator Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. data-fabric administrator The \" data-fabric user.\"\n The user that cluster services run as (typically named mapr or hadoop ) on each node. See data-fabric user . (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_mapr_administrator.html", + "title": "data-fabric administrator" + }, + { + "content": "\ndata-fabric gateway Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way\n communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON\n tables to their secondary indexes and propagates Change Data Capture (CDC)\n logs. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_mapr_gateway.html", + "title": "data-fabric gateway" + }, + { + "content": "\ndata-fabric user Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to\n administer the cluster. The administrative privilege, with varying levels of control,\n can be assigned to other users as well. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_mapr_user.html", + "title": "data-fabric user" + }, + { + "content": "\ndata node Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. data node A data node has the function of storing data and always runs FileServer. A data node is\n by definition a data-fabric cluster node. See also node . Compare with compute node . (Topic last modified: 2022-06-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_data_node.html", + "title": "data node" + }, + { + "content": "\ndesired replication factor Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the number of copies falls below the desired replication factor, but remains equal\n to or above the minimum replication factor , re-replication occurs\n after the timeout specified in the cldb.fs.mark.rereplicate.sec parameter. (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_desired_replication_factor.html", + "title": "desired replication factor" + }, + { + "content": "\ndeveloper preview Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. developer preview A label for a feature or collection of features that have usage restrictions.\n Developer previews are not tested for production environments, and should be used with\n caution. (Topic last modified: 2020-01-29) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_developer_preview.html", + "title": "developer preview" + }, + { + "content": "\nDocker containers Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_docker_containers.html", + "title": "Docker containers" + }, + { + "content": "\nDomain Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. Domain Relates to Object Store. A domain is a management entity for accounts and users. The\n number of users, the amount of disk space, number of buckets in each of the accounts, total\n number of accounts, and the number of disabled accounts are all tracked within a domain.\n Currently, Object Store only supports the primary domain; you cannot create additional domains.\n Administrators can create multiple accounts in the primary domain. (Topic last modified: 2022-01-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/glossary-domain.html", + "title": "Domain" + }, + { + "content": "\ndomain user Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. domain user Relates to Object Store. A domain user is a cluster security principal authenticated\n through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the\n Object Store UI with their domain username and password\u200b. (Topic last modified: 2022-01-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/glossary-domain-user.html", + "title": "domain user" + }, + { + "content": "\nEcosystem Pack (EEP) Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop\n ecosystem that are fully supported on the Data Fabric platform. (Topic last modified: 2023-09-12) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_ezmeral_ecosystem_pack.html", + "title": "Ecosystem Pack (EEP)" + }, + { + "content": "\nedge cluster Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process,\n and analyze IoT data close to the source of the data. (Topic last modified: 2023-04-29) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_edge_cluster.html", + "title": "edge cluster" + }, + { + "content": "\nedge node Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. edge node A node that runs the mapr-client that can access every cluster node and\n is used to access the cluster. Also referred to as a \"client node.\" Client nodes and\n edge nodes are NOT part of a data-fabric cluster. See also node and client node . (Topic last modified: 2022-06-28) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_edge_node.html", + "title": "edge node" + }, + { + "content": "\nfabric Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. fabric A collection of nodes that work together under a unified architecture, along with the\n services or technologies running on that architecture. A fabric is similar to a Linux\n cluster. Fabrics help you manage your data, making it possible to access, integrate,\n model, analyze, and provision your data seamlessly. (Topic last modified: 2023-09-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_fabric.html", + "title": "fabric" + }, + { + "content": "\nfilelet Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for\n instance is comprised of the following filelets: 64K (primary\n fid)+(256MB-64KB)+256MB+256MB+256MB. (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_filelet.html", + "title": "filelet" + }, + { + "content": "\nfile system Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. file\n system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_mapr_fs.html", + "title": "file system" + }, + { + "content": "\ngateway node Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. gateway node A node on which a mapr-gateway is installed. A gateway node is by\n definition a data-fabric cluster node. See also node (Topic last modified: 2023-04-29) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_gateway_node.html", + "title": "gateway node" + }, + { + "content": "\nglobal namespace (GNS) Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments.\n The global namespace is a mechanism that aggregates disparate and remote data sources\n and provides a namespace that encompasses all of your infrastructure and deployments.\n Global namespace technology lets you manage globally deployed data as a single resource.\n Because of the global namespace, you can view and run multiple fabrics as a single,\n logical, and local fabric. The global namespace is designed to span multiple edge nodes,\n on-prem data centers, and clouds. (Topic last modified: 2023-11-30) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_global_namespace.html", + "title": "global namespace (GNS)" + }, + { + "content": "\nheartbeat Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_heartbeat.html", + "title": "heartbeat" + }, + { + "content": "\nIAM users Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an\n actual user or an application. An administrator creates IAM users in an Object Store account and\n assigns access policies to them to control user and application access to resources in the\n account. (Topic last modified: 2022-01-26) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/glossary_iam_users.html", + "title": "IAM users" + }, + { + "content": "\nInstaller Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer\n guides you through the process of installing a cluster with data-fabric services and\n ecosystem components. You can also use the Installer to update a previously installed\n cluster with additional nodes, services, and ecosystem components. And you can use the\n Installer to upgrade a cluster to a newer core version if the cluster was installed\n using the Installer or an Installer Stanza. (Topic last modified: 2021-07-15) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_installer.html", + "title": "Installer" + }, + { + "content": "\nlog compaction Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. log compaction A process that purges messages previously published to a topic partition,\n retaining the latest version. More information data compaction (Topic last modified: 2019-02-27) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_log_compaction.html", + "title": "log compaction" + }, + { + "content": "\nMAST Gateway Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. MAST Gateway A gateway that serves as a centralized entry point for all the operations that\n need to be performed on tiered storage. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_mast_gateway.html", + "title": "MAST Gateway" + }, + { + "content": "\nminimum replication factor Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal\n operation. When the replication factor falls below this minimum, re-replication occurs\n as aggressively as possible to restore the replication level. If any containers in the\n CLDB volume fall below the minimum replication factor, writes are disabled until\n aggressive re-replication restores the minimum level of replication. (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_minimum_replication_factor.html", + "title": "minimum replication factor" + }, + { + "content": "\nmirror Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. mirror A replica of a volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. mirror A replica of a volume. (Topic last modified: 2023-04-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_mirror.html", + "title": "mirror" + }, + { + "content": "\nMOSS Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. MOSS MOSS is the acronym for Multithreaded Object Store Server. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. MOSS MOSS is the acronym for Multithreaded Object Store Server. (Topic last modified: 2022-05-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/glossary_moss.html", + "title": "MOSS" + }, + { + "content": "\nname container Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and\n the first 64 KB of each file in the volume. (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_name_container.html", + "title": "name container" + }, + { + "content": "\nNetwork File System (NFS) Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_network_file_system__nfs.html", + "title": "Network File System (NFS)" + }, + { + "content": "\nnode Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. node An individual server (physical or virtual machine) in a cluster. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. node An individual server (physical or virtual machine) in a cluster. (Topic last modified: 2023-04-29) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_node.html", + "title": "node" + }, + { + "content": "\nNodeManager (NM) Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource\n containers that run on each data node. (Topic last modified: 2020-06-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_nodemanager.html", + "title": "NodeManager (NM)" + }, + { + "content": "\nobject Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. object File and metadata that describes the file. You upload an object into a bucket. You can\n then download, open, move, or delete the object. (Topic last modified: 2022-01-19) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/glossary-object.html", + "title": "object" + }, + { + "content": "\nObject Store Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently\n stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for\n performance, reliability, and scalability. (Topic last modified: 2022-05-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss-object-store.html", + "title": "Object Store" + }, + { + "content": "\npolicy server Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. policy server The service that manages security policies and composite IDs. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. policy server The service that manages security policies and composite IDs. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_policy_server.html", + "title": "policy server" + }, + { + "content": "\nquota Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_quota.html", + "title": "quota" + }, + { + "content": "\nreplication factor Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. replication factor The number of copies of a volume. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. replication factor The number of copies of a volume. (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_replication_factor.html", + "title": "replication factor" + }, + { + "content": "\nreplication role Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. replication role The replication role of a container determines how that container is replicated to\n other storage pools in the cluster. A name container may have one of\n two replication roles: master or replica. A data container may have\n one of three replication roles: master, intermediate, or tail. (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_replication_role.html", + "title": "replication role" + }, + { + "content": "\nreplication role balancer Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_replication_role_balancer.html", + "title": "replication role balancer" + }, + { + "content": "\nre-replication Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_re_replication.html", + "title": "re-replication" + }, + { + "content": "\nResourceManager (RM) Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. ResourceManager (RM) A YARN service that manages cluster resources and schedules\n applications. (Topic last modified: 2020-06-16) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_resourcemanager.html", + "title": "ResourceManager (RM)" + }, + { + "content": "\nrole Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. role The service that the node runs in a cluster. You can use a node for one, or a combination\n of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper,\n FileServer, TaskTracker, NFS, and HBase. (Topic last modified: 2020-09-17) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_role.html", + "title": "role" + }, + { + "content": "\nsecret Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. secret A Kubernetes object that holds sensitive information, such as passwords, tokens,\n and keys. Pods that require this sensitive information reference the secret in their pod\n definition. Secrets are the method Kubernetes uses to move sensitive data into\n pods. (Topic last modified: 2020-02-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_secret.html", + "title": "secret" + }, + { + "content": "\nsecure by default Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security\n unless the user takes specific steps to turn off security options. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_secure_by_default.html", + "title": "secure by default" + }, + { + "content": "\nschedule Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_schedule.html", + "title": "schedule" + }, + { + "content": "\nsnapshot Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. snapshot A read-only logical image of a volume at a specific point in time. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. snapshot A read-only logical image of a volume at a specific point in time. (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_snapshot.html", + "title": "snapshot" + }, + { + "content": "\nstorage pool Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three\n disks. For high-volume reads and writes, you can create larger storage pools when\n initially formatting storage during cluster creation. NOTE: Storage pool refers to the combined storage capacity that is\n obtained by combining one or more storage devices. Storage devices can be anything from\n a very small disk drive to large arrays of disk drives (each containing 20-30 drives). A storage pool is created to get a very large capacity of GBs/TBs/PBs available,\n from which users are provided needed amounts of storage For example, one can\n combine 10 hard disk drives of 4TB each, totaling to 40TBs. Now, one can either directly\n use the 40TB as a single device or partition the space out to many smaller storage\n capacities such as 100GB, 1TB and so on from this 40TB and provide that access to\n different users. (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_storage_pool.html", + "title": "storage pool" + }, + { + "content": "\nstripe width Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. stripe width The number of disks in a storage pool. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. stripe width The number of disks in a storage pool. See storage pool . (Topic last modified: 2020-02-05) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_stripe_width.html", + "title": "stripe width" + }, + { + "content": "\nsuper group Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. super group The group that has administrative access to the data-fabric cluster. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. super group The group that has administrative access to the data-fabric cluster. (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_super_group.html", + "title": "super group" + }, + { + "content": "\nsuper user Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. super user The user that has administrative access to the data-fabric cluster. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. super user The user that has administrative access to the data-fabric cluster. (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_super_user.html", + "title": "super user" + }, + { + "content": "\ntagging Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. tagging Operation of applying a security policy to a resource. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. tagging Operation of applying a security policy to a resource. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_tagging.html", + "title": "tagging" + }, + { + "content": "\nticket Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers.\n Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents.\n Different types of tickets are provided for users and services. For example, every user\n who wants to access a cluster must have a user ticket, and every node in a cluster must\n have a server ticket. (Topic last modified: 2020-07-13) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_ticket.html", + "title": "ticket" + }, + { + "content": "\nvolume Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. volume A tree of files and directories grouped for the purpose of applying a policy or set of\n policies to all of them at once. (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_volume.html", + "title": "volume" + }, + { + "content": "\nWarden Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. Warden A data-fabric process that\n coordinates the starting and stopping of configured services on a node. (Topic last modified: 2023-04-03) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_warden.html", + "title": "Warden" + }, + { + "content": "\nYARN resource containers Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce\n task. (Topic last modified: 2018-11-02) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_yarn_resource_containers.html", + "title": "YARN resource containers" + }, + { + "content": "\nZooKeeper Jump to main content Get Started Platform Administration Reference Home Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Glossary Definitions for commonly used terms in MapR Converged Data Platform\n environments. .snapshot A special directory in the top level of each volume that contains all the snapshots created or preserved for the volume. access control expression (ACE) A Boolean expression that defines a combination of users, groups, or roles that have access to an object stored natively such as a directory, file, or HPE Ezmeral Data Fabric Database table. access control list (ACL) A list of permissions attached to an object. An ACL specifies users or system processes that can perform specific actions on an object. access policy An ACL or policy in JSON format that describes user access. Grants accounts and IAM users permissions to perform resource operations, such as putting objects in a bucket. You associate access policies with accounts, users, buckets, and objects. administrator A user or users with special privileges to administer the cluster or cluster resources. Administrative functions can include managing hardware resources, users, data, services, security, and availability. advisory quota An advisory disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the advisory quota, an alert is sent. air gap Physical isolation between a computer system and unsecured networks. To enhance security, air-gapped computer systems are disconnected from other systems and networks. chunk Files in the file system are split into chunks (similar to Hadoop blocks) that are normally 256 MB by default. Any multiple of 65,536 bytes is a valid chunk size, but tuning the size correctly is important. Files inherit the chunk size settings of the directory that contains them, as do subdirectories on which chunk size has not been explicitly set. Any files written by a Hadoop application, whether via the file APIs or over NFS, use chunk size specified by the settings for the directory where the file is written. client node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as an \"edge node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. cluster admin The data-fabric user . compute node A compute node is used to process data using a compute engine (for example, YARN, Hive, Spark, or Drill). A compute node is by definition a data-fabric cluster node. container The unit of shared storage in a data-fabric cluster. Every container is either a name container or a data container. container location database (CLDB) A service, running on one or more data-fabric nodes, that maintains the locations of services, containers, and other cluster information. core The minimum complement of software packages required to construct a data-fabric cluster. These packages include mapr-core , mapr-core-internal , mapr-cldb , mapr-apiserver , mapr-fileserver , mapr-zookeeper , and others. Note that ecosystem components are not part of core. data-access gateway A service that acts as a proxy and gateway for translating requests between lightweight client applications and the data-fabric cluster. data compaction A process that enables users to remove empty or deleted space in the database and to compact the database to occupy contiguous space. data container One of the two types of containers in a data-fabric cluster. Data containers typically have a cascaded configuration (master replicates to replica1, replica1 replicates to replica2, and so on). Every data container is either a master container, an intermediate container, or a tail container depending on its replication role. data fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. data-fabric administrator The \" data-fabric user.\" The user that cluster services run as (typically named mapr or hadoop ) on each node. data-fabric gateway A gateway that supports table and stream replication. The data-fabric gateway mediates one-way communication between a source data-fabric cluster and a destination cluster. The data-fabric gateway also applies updates from JSON tables to their secondary indexes and propagates Change Data Capture (CDC) logs. data-fabric user The user that cluster services run as (typically named mapr or hadoop ) on each node. The data-fabric user, also known as the \" data-fabric admin,\" has full privileges to administer the cluster. The administrative privilege, with varying levels of control, can be assigned to other users as well. data node A data node has the function of storing data and always runs FileServer. A data node is by definition a data-fabric cluster node. desired replication factor The number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. developer preview A label for a feature or collection of features that have usage restrictions. Developer previews are not tested for production environments, and should be used with caution. Docker containers The application containers used by Docker software. Docker is a leading proponent of OS virtualization using application containers (\"containerization\"). Domain Relates to Object Store. A domain is a management entity for accounts and users. The number of users, the amount of disk space, number of buckets in each of the accounts, total number of accounts, and the number of disabled accounts are all tracked within a domain. Currently, Object Store only supports the primary domain; you cannot create additional domains. Administrators can create multiple accounts in the primary domain. domain user Relates to Object Store. A domain user is a cluster security principal authenticated through AD/LDAP. Domain users only exist in the default account. Domain users can log in to the Object Store UI with their domain username and password\u200b. Ecosystem Pack (EEP) A selected set of stable, interoperable, and widely used components from the Hadoop ecosystem that are fully supported on the Data Fabric platform. edge cluster A small-footprint edition of the HPE Ezmeral Data Fabric designed to capture, process, and analyze IoT data close to the source of the data. edge node A node that runs the mapr-client that can access every cluster node and is used to access the cluster. Also referred to as a \"client node.\" Client nodes and edge nodes are NOT part of a data-fabric cluster. fabric A collection of nodes that work together under a unified architecture, along with the services or technologies running on that architecture. A fabric is similar to a Linux cluster. Fabrics help you manage your data, making it possible to access, integrate, model, analyze, and provision your data seamlessly. filelet A filelet, also called an fid, is a 256MB shard of a file. A 1 GB file for instance is comprised of the following filelets: 64K (primary fid)+(256MB-64KB)+256MB+256MB+256MB. file system The NFS-mountable, distributed, high-performance HPE Ezmeral Data Fabric data-storage system. gateway node A node on which a mapr-gateway is installed. A gateway node is by definition a data-fabric cluster node. global namespace (GNS) The data plane that connects HPE Ezmeral Data Fabric deployments. The global namespace is a mechanism that aggregates disparate and remote data sources and provides a namespace that encompasses all of your infrastructure and deployments. Global namespace technology lets you manage globally deployed data as a single resource. Because of the global namespace, you can view and run multiple fabrics as a single, logical, and local fabric. The global namespace is designed to span multiple edge nodes, on-prem data centers, and clouds. heartbeat A signal sent by each FileServer and NFS node every second to provide information to the CLDB about the node's health and resource usage. IAM users Relates to Object Store. An IAM (Identity and Access Management) user represents an actual user or an application. An administrator creates IAM users in an Object Store account and assigns access policies to them to control user and application access to resources in the account. Installer A program that simplifies installation of the HPE Ezmeral Data Fabric. The Installer guides you through the process of installing a cluster with data-fabric services and ecosystem components. You can also use the Installer to update a previously installed cluster with additional nodes, services, and ecosystem components. And you can use the Installer to upgrade a cluster to a newer core version if the cluster was installed using the Installer or an Installer Stanza. log compaction A process that purges messages previously published to a topic partition, retaining the latest version. MAST Gateway A gateway that serves as a centralized entry point for all the operations that need to be performed on tiered storage. minimum replication factor The minimum number of copies of a volume that should be maintained by the data-fabric cluster for normal operation. When the replication factor falls below this minimum, re-replication occurs as aggressively as possible to restore the replication level. If any containers in the CLDB volume fall below the minimum replication factor, writes are disabled until aggressive re-replication restores the minimum level of replication. mirror A replica of a volume. MOSS MOSS is the acronym for Multithreaded Object Store Server. name container A container in a data-fabric cluster that holds a volume's namespace information and file chunk locations, and the first 64 KB of each file in the volume. Network File System (NFS) A protocol that allows a user on a client computer to access files over a network as though they were stored locally. node An individual server (physical or virtual machine) in a cluster. NodeManager (NM) A data service that works with the ResourceManager to host the YARN resource containers that run on each data node. object File and metadata that describes the file. You upload an object into a bucket. You can then download, open, move, or delete the object. Object Store Object and metadata storage solution built into the HPE Ezmeral Data Fabric . Object Store efficiently stores data for fast access and leverages the capabilities of the patented HPE Ezmeral Data Fabric file system for performance, reliability, and scalability. policy server The service that manages security policies and composite IDs. quota A disk capacity limit that can be set for a volume, user, or group. When disk usage exceeds the quota, no more data can be written. replication factor The number of copies of a volume. replication role The replication role of a container determines how that container is replicated to other storage pools in the cluster. replication role balancer The replication role balancer is a tool that switches the replication roles of containers to ensure that every node has an equal share of of master and replica containers (for name containers) and an equal share of master, intermediate, and tail containers (for data containers). re-replication Re-replication occurs whenever the number of available replica containers drops below the number prescribed by that volume's replication factor. Re-replication may occur for a variety of reasons including replica container corruption, node unavailability, hard disk failure, or an increase in replication factor. ResourceManager (RM) A YARN service that manages cluster resources and schedules applications. role The service that the node runs in a cluster. You can use a node for one, or a combination of the following roles: CLDB, JobTracker, WebServer, ResourceManager, Zookeeper, FileServer, TaskTracker, NFS, and HBase. secret A Kubernetes object that holds sensitive information, such as passwords, tokens, and keys. Pods that require this sensitive information reference the secret in their pod definition. Secrets are the method Kubernetes uses to move sensitive data into pods. secure by default The HPE Ezmeral Data Fabric platform and supported ecosystem components are designed to implement security unless the user takes specific steps to turn off security options. schedule A group of rules that specify recurring points in time at which certain actions are determined to occur. snapshot A read-only logical image of a volume at a specific point in time. storage pool A unit of storage made up of one or more disks. By default, data-fabric storage pools contain two or three disks. For high-volume reads and writes, you can create larger storage pools when initially formatting storage during cluster creation. stripe width The number of disks in a storage pool. super group The group that has administrative access to the data-fabric cluster. super user The user that has administrative access to the data-fabric cluster. tagging Operation of applying a security policy to a resource. ticket In the data-fabric platform, a file that contains keys used to authenticate users and cluster servers. Tickets are created using the maprlogin or configure.sh utilities and are encrypted to protect their contents. Different types of tickets are provided for users and services. For example, every user who wants to access a cluster must have a user ticket, and every node in a cluster must have a server ticket. volume A tree of files and directories grouped for the purpose of applying a policy or set of policies to all of them at once. Warden A data-fabric process that coordinates the starting and stopping of configured services on a node. YARN resource containers A unit of memory allocated for use by YARN to process each map or reduce task. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical namespace that is organized like a standard file system. ZooKeeper A coordination service for distributed applications. It provides a shared hierarchical\n namespace that is organized like a standard file system. (Topic last modified: 2023-12-11) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/glossary/gloss_zookeeper.html", + "title": "ZooKeeper" + }, + { + "content": "\nLanding Page Nav Version 2 Jump to main content Get Started Platform Administration Reference HPE Ezmeral Data Fabric 7.6 Documentation Search current doc version Landing Page Nav Version 2 This file is a resource file used for creating a persona based overflow for common\n tasks. HPE Ezmeral Data Fabric is the as-a-service solution for the\n hybrid enterprise with data distributed from edge to core to cloud. \n The federated global namespace integrates files, objects, and \n streaming data and offers consumption-based pricing. Far-flung \n deployments run in a single, logical view no matter where the data \n is located. Learn more Get Started Describes what's new in the current\n\t release, tells you how to install the platform, documents known issues, and explains\n\t how to obtain user assistance. Platform Describes concepts that are useful in understanding the Data Fabric. Administration Describes how to use the\n the HPE Ezmeral Data Fabric. Tasks include creating fabrics and volumes,\n working with users, and managing alarms. Reference Contains licensing information and pointers to \n\t other resources. (Topic last modified: 2022-09-20) \u00a9Copyright 2024 Hewlett Packard Enterprise Development LP - Partners | Support | Dev-Hub | Community | Training | ALA | Privacy Policy | Glossary", + "url": "https://docs.ezmeral.hpe.com/datafabric/76/landing_page_v2.html", + "title": "Landing Page Nav Version 2" + } +] \ No newline at end of file diff --git a/demos/rag-demos/question-answering-gpu/documents/EzUA.json b/demos/rag-demos/question-answering-gpu/documents/EzUA.json new file mode 100644 index 00000000..2a47a03c --- /dev/null +++ b/demos/rag-demos/question-answering-gpu/documents/EzUA.json @@ -0,0 +1,812 @@ +[ + { + "content": "\nGet Started Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . The following sections provide links to topics for administrators and members to get started\n with HPE Ezmeral Unified Analytics Software . Administrators Administrators may be interested in the following topics: Installation AD/LDAP Servers Identity and Access Management Adding and Removing Users Importing Applications and Managing the Application Lifecycle Connecting to External HPE Ezmeral Data Fabric Clusters Configuring Endpoints Members Members (non-administrative users) may be interested in the following topics: Tutorials Data Engineering Data Analytics Data Science Notebooks About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. On this page Administrators Members Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/GetStarted/get-started.html", + "title": "Get Started" + }, + { + "content": "\nAbout Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. About Provides an overview of HPE Ezmeral Unified Analytics Software . HPE Ezmeral Unified Analytics Software is usage-based\n Software-as-a-Service (SaaS) that fully manages, supports, and maintains hybrid and\n multi-cloud modern analytical workloads through open-source tools. HPE Ezmeral Unified Analytics Software separates compute and\n storage for flexible, cost-efficient scalability to securely access data stored in multiple\n data platforms through a simple user interface, which is easily installed and deployed on\n private, public, and on-premises infrastructure. Features and Functionality HPE Ezmeral Unified Analytics Software provides the\n following features and functionality in a single UX: Access data anywhere and manage it in one place Connect bidirectionally to multiple data platforms and join data to create a\n federated data mesh that you manage in one place. Includes authentication,\n authorization, logging, metrics collection, and monitoring. Robust, integrated storage layer Includes an integrated, scalable data fabric storage layer with data-mesh like\n capabilities as the ephemeral storage for all types of data, including structured and\n unstructured data, files, objects, and streams. Analytical workloads Support for the most common enterprise analytics use cases ranging from traditional\n BI/Reporting (via PrestoDB and SparkSQL interfaces) to emerging workloads, such as\n exploratory data science, real-time analytics, and machine learning workflows. Self-service data access All users, including administrators, data engineers, data analysts, and data\n scientists can directly access data from HPE Ezmeral Unified Analytics Software . Built-in access to BI dashboards and data science tools Includes built-in BI dashboards for analytics and operational reporting, Also\n includes web-based notebook interfaces, such as Jupyter Lab and Visual Studio, for\n data science workflows (model training and serving frameworks). Built-in SSO Supports single sign-on experience; users sign in to access HPE Ezmeral Unified Analytics Software and compute\n components integrate with the storage platform infrastructure to pass the identity of\n each user. Performance Distributed, in-memory caching ( explicit) that accelerates\n federated queries on commonly used datasets. Compute Components The compute components included in HPE Ezmeral Unified Analytics Software enable users to get up and running in minutes. Components\n connect to each other at start-up and use pre-defined storage areas in the built-in data\n fabric. When applicable, compute components can automatically take advantage of GPUs. The following list describes the compute components included in HPE Ezmeral Unified Analytics Software : Spark Spark is a primary engine for data analytics tasks. EzPresto EzPresto is a distributed SQL query\n engine with a built-in query federation capability (distributed in-memory caching and\n pushdown optimizations) for fast analytic queries on data of any size. Kubeflow Kubeflow as an ML framework focused on model training that includes Notebooks,\n Pipelines (Airflow), Experiments, Kserve, and various distributed training\n operators. Airflow Airflow for data engineering and task automation. Notebooks Jupyter notebooks for performing varied data science tasks, such as cleaning data,\n labeling features, testing toy models, and launching distributed training models. Dashboard Frameworks Dashboard frameworks for building data models and visualizations. Workflows and Pipelines HPE Ezmeral Unified Analytics Software provides\n simplified workflows and pipelines for data engineers, data analysts, and data scientists to\n solve complex problems. The following image shows some of the supported workflows and pipelines: On this page Features and Functionality Compute Components Workflows and Pipelines Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/About/overview_and_features.html", + "title": "About" + }, + { + "content": "\nTutorials Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included\n applications, such as tutorials for data science and data analytics workflows with notebooks and\n applications like Spark, MLflow, Feast, Airflow, and EzPresto. The following sections describe the tutorials and provide links to access the\n complete tutorials in GitHub. Fraud Detection Use Case Overview In this tutorial, data scientists and machine learning engineers inspect fraudulent\n transactions using the Bankism dataset. This synthetically created dataset is a\n combination of various customer payments, made at different intervals and in varying\n amounts. This tutorial covers everything from data processing, through model development, to\n the final stage of model deployment. By the end of this tutorial, you will learn how to detect and curtail fraudulent\n activities with high accuracy. Tools This tutorial uses the following components from HPE Ezmeral Unified Analytics Software : Kale to enable the transformation of a Jupyter Notebook into a Kubeflow\n Pipeline. Kubeflow Pipelines to scale the training and deployment process in a\n reproducible way. MinIO to store the training artifacts. KServe as a fully trained machine learning model. GitHub Link To complete this tutorial, follow the instructions outlined in the fraud detection tutorial . Question-Answering Use Case Overview In this tutorial, data engineers explore the deployment of LLMs (Large Language\n Models) to write the code that serves the model, processes the user requests, and\n packages everything in a custom Docker. This tutorial utilizes an open-source Large Language Model (LLM) that can answer\n questions over a corpus of private documentation. To achieve this, the tutorial\n employs a Vector Store that captures and indexes a latent representation for each\n document. This allows the application to retrieve the relevant context based on the\n user's questions, enabling accurate and efficient question answering. Tools This tutorial uses the following components from HPE Ezmeral Unified Analytics Software : KServe to serve the fully trained machine learning model. GitHub Link To complete this tutorial, follow the instructions outlined in the question-answering tutorial . Wind Turbine Use Case Overview In this tutorial, data scientists and data engineers use Spark to explore the\n training dataset and train a Gradient-Boosted Tree (GBT) regressor. GBT regressor\n estimates the power output of a wind turbine by utilizing various features, such as\n wind speed and direction. Tools This tutorial uses the following components from HPE Ezmeral Unified Analytics Software : Apache Spark to process large-scale data and train machine learning models in\n a distributed manner. Apache Livy to enable easy interaction with a Spark cluster over a REST\n interface. GitHub Link To complete this tutorial, follow the instructions outlined in the wind turbine tutorial . Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . On this page Fraud Detection Use Case Question-Answering Use Case Wind Turbine Use Case Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/Tutorials.html", + "title": "Tutorials" + }, + { + "content": "\nPreparing the Tutorial Environment Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits\n Recognition tutorials. Prerequisites: Sign in as an administrator to prepare the environment for the\n Financial Time Series and MNIST Digits Recognition tutorials. Create an S3 Object Store Bucket and Load Data The Spark application reads raw data from the S3 Object Store. Use MinIO to create an S3 bucket named ezaf-demo and put the following\n files in the ezaf-demo bucket, as described: Create data/mnist directory in the ezaf-demo bucket,\n and upload the following dataset to the mnist folder: https://github.com/HPEEzmeral/ezua-tutorials/tree/main/Data-Science/Kubeflow/MNIST-Digits-Recognition/dataset Create a data folder in the ezaf-demo bucket, and\n the following data set to the data folder: https://github.com/HPEEzmeral/ezua-tutorials/tree/main/Data-Science/Kubeflow/Financial-Time-Series/dataset Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/pepare-tutorial-env.html", + "title": "Preparing the Tutorial Environment" + }, + { + "content": "\nData Source Connectivity and Exploration Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . You can connect to data sources and work with data within the Data Engineering space of HPE Ezmeral Unified Analytics Software . The Data\n Engineering space includes: Data Sources \u2013 View and access connected data sources; create new data source\n connections. Data Catalog \u2013 Select data sets (tables and views) from one or more data sources\n and query data across the data sets. You can cache data sets. Caching stores the data in a\n distributed caching layer within the data fabric for accelerated access to the data. Query Editor \u2013 Run queries against selected data sets; create views and new\n schemas. Cached Assets \u2013 Lists the cached data sets (tables and views). Airflow Pipelines \u2013 Links to the Airflow interface where you can connect to data\n sets created in HPE Ezmeral Unified Analytics Software and use them in your data pipelines. Tutorial Objective Although you can perform more complex tasks in HPE Ezmeral Unified Analytics Software , the purpose of this tutorial is to walk you through\n some Data Engineering basics and familiarize you with the interface, including how to: Connect data sources Select predefined data sets in data sources Join data across data sets/data sources Create a voew Run a query against the view This tutorial takes approximately 10 minutes to complete. You may want to print the following instructions or open the instructions on a different\n monitor to avoid switching between HPE Ezmeral Unified Analytics Software and the tutorial on one monitor. IMPORTANT This tutorial\n demonstrates how to perform a series of tasks in HPE Ezmeral Unified Analytics Software to complete an example workflow. The data and\n information used in this tutorial is for example purposes only. You must connect Unified Analytics to your own data sources and use\n the data sets available to you in your data sources. A \u2013 Sign in to HPE Ezmeral Unified Analytics Software Sign in to HPE Ezmeral Unified Analytics Software with the URL provided by your administrator. B \u2013 Connect Data Sources Connect HPE Ezmeral Unified Analytics Software to\n external data sources that contain the data sets (tables and views) you want to work with.\n This tutorial uses MySQL and Snowflake as the connected data sources. To connect a data source: In the left navigation column, select Data Engineering > Data\n Sources . The Data Sources screen appears. Click Add New Data Source . Complete the steps required to connect to the MySQL, Snowflake, and Hive data sources: Connecting to MySQL In the Add New Data Source screen, click Create\n Connection in the MySQL tile. In the drawer that opens, enter required information in the respective\n fields: NOTE The information used here is for example purposes\n only. Name : mysql Connection URL : jdbc:mysql://: Connection User : demouser Connection Password : moi123 Enable Local Snapshot Table : Select the check box TIP When Enable Local Snapshot Table is selected, the system caches remote\n table data to accelerate queries on the tables. The cache is active for\n the duration of the configured TTL or until the remote tables in the\n data source are altered. Click Connect . Upon successful connection, the system returns the\n following\n message: Successfully added data source \"mysql\". Connecting to Snowflake In the Add New Data Source screen, click Create\n Connection in the Snowflake tile. In the drawer that opens, enter the following information in the respective\n fields: Name : snowflake_ret Connection URL : jdbc:snowflake://mydomain.com/ Connection User : demouser Connection Password : moi123 Snowflake DB : my_snowflake_db Enable Local Snapshot Table : Select the check box TIP When Enable Local Snapshot Table is selected, the system caches remote\n table data to accelerate queries on the tables. The cache is active for\n the duration of the configured TTL or until the remote tables in the\n data source are altered. Click Connect . Upon successful connection, the system returns the\n following\n message: Successfully added data source \"snowflake_ret\". Connecting to Hive In the Add New Data Source screen, click Create\n Connection in the Hive tile. In the drawer that opens, enter the following information in the respective\n fields: Name : hiveview Hive Metastore : file Hive Metastore Catalog Dir : file:///data/shared/tmpmetastore In Optional Fields , search for the following fields and add the\n specified values: Hive Max Partitions Per Writers : 10000 Hive Temporary Staging Directory Enabled : Unselect Hive Allow Drop Table : Select Enable Local Snapshot Table : Select the check box TIP When Enable Local Snapshot Table is selected, the system caches remote\n table data to accelerate queries on the tables. The cache is active for\n the duration of the configured TTL or until the remote tables in the\n data source are altered. Click Connect . Upon successful connection, the system returns the\n following\n message: Successfully added data source \"hiveview\". C \u2013 Select Data Sets in the Data Catalog In the Data Catalog, select the data sets (tables and views) in each of the data sources\n that you want to work with. This tutorial uses the customer tables in the connected mysql and snowflake_ret data sources. In the mysql data source, the schema for the\n customer table is retailstore . In the snowflake_ret data source, the schema\n for the customer table is public . To select the data sets that you want to work with: In the left navigation bar, select Data Engineering > Data\n Catalog . On the Data Catalog page, click the dropdown next to the mysql and snowflake_ret data sources to expose the available schemas in those data\n sources. For the snowflake_ret data source select the public schema and for the mysql data source, select the retailstore schemas. In the All Datasets search field, enter a search term to limit the number of\n data sets. This tutorial searches on data sets with the name customer . All the\n data sets that have customer in the name with public or retailstore schema display. Click a customer table and preview its data in the Columns and Data\n Preview tabs. NOTE Do not click the browser's back button; doing so takes you to\n the Data Sources screen and you will have to repeat the previous steps. Click Close to return to the data sets. Click Select by each of the tables named customer . Selected Datasets\n should show 2 as the number of data sets selected. Click Selected Datasets . The Selected Datasets drawer opens, giving you another\n opportunity to preview the datasets or discard them. From here, you can either query or\n cache the selected data sets. For the purpose of this tutorial, we will query the data\n sets. Click Query Editor . D \u2013 Run a JOIN Query on Data Sets and Create a View The data sets you selected display under Selected Datasets in the Query Editor. Run a JOIN\n query to join data from the two customer tables and then create a view from the query. The\n system saves views as cached assets that you can reuse. To view table columns and run a JOIN query: Expand the customer tables in the Selected Datasets section to view the columns\n in each of the tables. In the SQL Query workspace, click + to add a worksheet. Copy and paste the following query into the SQL Query field. This query creates\n the a new schema in the hiveview data source named demoschema : create schema if not exists hiveview.demoschema; Click Run to run the query. As the query runs, a green light pulsates next to\n the Query ID in the Query Results section to indicate that the query is in progress.\n When the query is completed, the Status column displays Succeeded. In the SQL Query workspace, click + to add a worksheet. Copy and paste the following query into the SQL Query field. This query creates\n a view (hiveview.demoschema) from a query that joins columns from the two customer tables (in the mysql and snowflake-ret data sources) on the customer\n ID . create view hiveview.demoschema.customer_info_view as SELECT t1.c_customer_id, t1.c_first_name, t1.c_last_name, t2.c_email_address FROM mysql.retailstore.customer t1 INNER JOIN snowflake_ret.public.customer t2 ON t1.c_customer_id=t2.c_customer_id Click Run to run the query. In the SQL Query workspace, click + to add a worksheet. Copy and paste the following query into the SQL Query field. This runs against\n the view you created (hiveview.demoschema) and returns all data in the\n view. SELECT * FROM hiveview.demoschema.customer_info_view; Click Run to run the query. In the Query Results section, expand the Actions option for the query\n and select Query Details to view the query session and resource utilization\n summary. Click Close to exit out of Query Details. End of Tutorial You have completed this tutorial. This tutorial demonstrated how easy it is to connect HPE Ezmeral Unified Analytics Software to various\n data sources for federated access to data through a single interface using standard SQL\n queries. You may also be interested in the BI Reporting (Superset) Basics , which\n shows you how to create a Superset dashboard using the view (customer_info_view) and schema\n (customer_schema) created in this tutorial. On this page Tutorial Objective A \u2013 Sign in to HPE Ezmeral Unified Analytics Software B \u2013 Connect Data Sources C \u2013 Select Data Sets in the Data Catalog D \u2013 Run a JOIN Query on Data Sets and Create a View End of Tutorial Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/data-engineer-basics-tutorial.html", + "title": "Data Source Connectivity and Exploration" + }, + { + "content": "\nBI Reporting (Superset) Basics Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . You can add data sets that you created in HPE Ezmeral Unified Analytics Software to Superset and visualize the data in dashboards. You can access\n dashboards (Superset) from the BI Reporting space within HPE Ezmeral Unified Analytics Software . Tutorial Objective The purpose of this\n tutorial is to walk you through some Superset basics to familiarize you with the interface\n and how to use it with the data sets you create in HPE Ezmeral Unified Analytics, including\n how to: Add datasets created in HPE Ezmeral Unified Analytics Software to Superset Visualize the data set in a chart Create a dashboard Add the chart to the dashboard This tutorial takes approximately 10 minutes to complete. You may want to\n print the following instructions or open the instructions on a different monitor to avoid\n switching between HPE Ezmeral Unified Analytics Software and the tutorial on one monitor. IMPORTANT This tutorial\n demonstrates how to perform a series of tasks in HPE Ezmeral Unified Analytics Software to complete an example workflow. The data and\n information used in this tutorial is for example purposes only. You must connect Unified Analytics to your own data sources and use\n the data sets available to you in your data sources. Prerequisite This tutorial builds on Data Source Connectivity and Exploration . In the Data Source Connectivity and\n Exploration tutorial, you created a view (customer_info_view) and a schema (customer_schema)\n from a query that joined customer tables from two different data sources (MySQL and\n Snowflake). In this tutorial, you import the view and schema into Superset, visualize the data\n in a chart, and add the chart to a dashboard. A \u2013 Sign in to HPE Ezmeral Unified Analytics Software Sign in to HPE Ezmeral Unified Analytics Software with the URL provided by your administrator. B - Connect to the Presto Database Complete the following steps to connect Superset to the Presto database for access to your\n data sources and data sets in HPE Ezmeral Unified Analytics Software . Once connected to the Presto database, you can access your\n data sets in HPE Ezmeral Unified Analytics Software from Superset. To connect to the Presto database, you need the connection URI. You can get the URI from\n your HPE Ezmeral Unified Analytics Software administrator. To open Superset, in the left navigation pane of HPE Ezmeral Unified Analytics Software , select BI Reporting >\n Dashboards . Superset opens in a new tab. In Superset, select Settings > Database Connections . Click +DATABASE . In the Connect a database window, select the Presto tile. Enter the SQLALCHEMY URI provided by your administrator. Test the connection. If the test was successful, click Connect . C \u2013 Add a Data Set to a Chart To add a dataset to a chart: Select the Datasets tab. Click + DATASET . In the Add dataset window, make the following selections in the fields: DATABASE: Presto SCHEMA: SEE TABLE SCHEMA: Click ADD DATASET AND CREATE CHART . In Choose chart type column, select #Popular and choose Table . Click CREATE NEW CHART . In the chart screen, enter a name for the chart. For example, name the chart Customer Info . Select RAW RECORDS as the QUERY MODE . Drag and drop the following four columns into the COLUMNS field: c_customer_id c_first_name c_last_name c_email_address Click into the Filters field and select or enter the following information in\n the window that opens: c_first_name Equal to (=) Charles Click SAVE . Click CREATE CHART . The query runs and results that meet the query conditions\n display. The chart displays four columns of data for customers with the first name\n Charles. Click SAVE to save the chart. A window opens. Click SAVE in the window.\n Do not add to a dashboard. Superset saves the chart. D \u2013 Create a Dashboard and Add the Chart To create a dashboard and add the chart you created to the dashboard: In Superset, click the Dashboards tab. Click +DASHBOARD . Enter a name (title) for the dashboard, for example Customer Dashboard . In the right navigation bar, click the LAYOUT ELEMENTS tab. Drag and drop the Header element into the dashboard. In the Header element, enter a title, for example Customers Named\n Charles . In the right navigation bar, click the CHARTS tab. Locate the chart you created (Customer Info) and drag and drop the chart into the\n dashboard. You may need to drag the chart over the Header title and drop it there to get\n it to stay in place. A blue line appears in the dashboard when the chart is in a place\n it can be dropped. Click SAVE to save the dashboard. End of Tutorial You have completed this tutorial. This tutorial demonstrated the integration of the HPE Ezmeral Unified Analytics Software SQL query\n engine ( EzPresto ) with Superset to visualize data models that you create in\n the Data Engineering space using the charting and dashboarding features in Superset. You may also be interested in the Retail Store Analysis Dashboard (Superset) , which\n shows you how to create a database connection, visualize data, and monitor queries used in\n visualizations. On this page Tutorial Objective Prerequisite A \u2013 Sign in to HPE Ezmeral Unified Analytics Software B - Connect to the Presto Database C \u2013 Add a Data Set to a Chart D \u2013 Create a Dashboard and Add the Chart End of Tutorial Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/ua-superset-basics-tutorial.html", + "title": "BI Reporting (Superset) Basics" + }, + { + "content": "\nCandy Sharing Tutorial (Kale) Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook\n annotations. Upload candies_sharing.ipynb if you do not already\n have the file. Log in to the Kubeflow notebook. Upload candies_sharing.ipynb using the Upload Files button inside the Kubeflow notebook. Open the candies_sharing.ipynb file and enable the Kale\n extension. Run all cells in the notebook using Run -> Run All Cells . At the bottom of Kale Deployment Panel , select COMPILE\n AND RUN and then click the button. Open the Kubeflow Dashboard from the Runs page and check the\n status of the pipeline run launched from the Kubeflow notebook. More information https://github.com/kubeflow-kale/kale Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/candy-sharing-tutorial.html", + "title": "Candy Sharing Tutorial (Kale)" + }, + { + "content": "\nFeast Ride Sharing Use Case Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online\n model inference for the ride-sharing driver satisfaction model. Prerequisites Sign in to HPE Ezmeral Unified Analytics Software . About this task Use Feast to generate training data and perform online model inference for the\n ride-sharing driver satisfaction model. In this tutorial, you will: Deploy a local feature store with a Parquet file offline store and SQLite online\n store. Build a training dataset using time series features from Parquet files. Read the latest features from the offline store for batch scoring. Ingest batch features (\"materialization\") and streaming features into the online\n store. Read the latest features from the online store for real-time inference. Explore the Feast web interface to see Data Sources, Entities, Feature Views,\n Feature Services, and Datasets which are defined through feature\n definitions. Procedure Connect to the notebook server. See Creating and Managing Notebook Servers . Copy the Feast folder from the /shared directory into the / directory. NOTE If the Feast folder is not available in the /shared directory, perform: Go to GitHub repository for tutorials . Clone the repository. Navigate to ezua-tutorials/Data-Science . Navigate back to the /shared directory. Copy the /Feast folder from the ezua-tutorials/Data-Science repository into\n the /shared directory. Copy the /Feast folder from /shared folder to directory. Validate the ride-sharing-example.ipynb file, definitions.py file, and the data folder\n are available in the //Feast directory. Validate the driver_stats.parquet file is available in the //Feast directory. Open the definitions.py file and update the path for the driver_stats.parquet file for your username. For\n eg: /home///feast/data/driver_stats.parquet Open the ride-sharing-example.ipynb file and update the path\n for the driver_stats.parquet file for your username. For\n eg: /home///feast/data/driver_stats.parquet Select the first cell of the notebook and click Run the selected cells and\n advance (play icon). Results Click the Tools & Frameworks icon on the left\n navigation bar. Navigate to the Feast tile under the Data Science tab and click Open . Explore the Feast web interface to see Data Sources, Entities, Feature Views,\n Feature Services, and Datasets that are defined through feature definitions. Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/feast-ride-sharing-usecase.html", + "title": "Feast Ride Sharing Use Case" + }, + { + "content": "\nFinancial Time Series Workflow Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a\n Jupyter notebook to analyze and visualize data that the Spark application puts into a shared\n directory in the shared volume that the data scientist\u2019s notebook is mounted to. Scenario NOTE An administrator must prepare the environment for this tutorial to work. See Preparing the Tutorial Environment . A DAG source (located in GitHub) is coded to\n submit a Spark job that pulls CSV data (financial.csv) from an S3 data\n source, transforms the data into Parquet format, and puts the data in a shared volume in the financial-processed folder. The following diagram shows the components and applications in the workflow: Steps Sign in to HPE Ezmeral Unified Analytics Software and perform the following steps: Prerequisites Use Airflow to run a DAG that\n submits a Spark application. View the Spark application that the\n DAG submitted. Connect to and run\n the Jupyter notebook to analyze and visualize the data. Prerequisites Connect to your Jupyter notebook and perform setup tasks to prepare the environment to\n train the model. A folder with a sample notebook file and\n SSL certificate is provided for the purpose of this tutorial. To connect your notebook\n and perform setup tasks: In the HPE Ezmeral Unified Analytics Software , go to Applications &\n Frameworks . Select the Data Science tab and then click Open in the Kubeflow tile. In Kubeflow, click Notebooks to open the notebooks page. Click Connect to connect to your notebook server. Go to the / folder. Copy the template object_store_secret.yaml.tpl file from the shared/ezua-tutorials/Data-Analytics/Spark directory to the folder. In the /Financial-Time-Series folder, open the financial_time_series_example.ipynb file. NOTE If you do not see the Financial-Time-Series folder in the folder, copy the folder from the /shared/ezua-tutorials/Data-Science/Kubeflow directory into the folder. The /shared directory is\n accessible to all users. Editing or running examples from the /shared directory is not advised. The directory is specific to\n you and cannot be accessed by other users If the Financial-Time-Series folder is not available in the /shared/ezua-tutorials/Data-Science/Kubeflow directory,\n perform: Go to GitHub repository for tutorials . Clone the repository. Navigate to ezua-tutorials/Data-Science/Kubeflow . Navigate back to the directory. Copy the Financial-Time-Series folder from the ezua-tutorials/Data-Science/Kubeflow directory into the directory. To generate a secret to read data source files from S3 bucket by Spark application\n (Airflow DAG), run the first cell of the financial_time_series_example.ipynb file: import kfp\nkfp_client = kfp.Client()\nnamespace = kfp_client.get_user_namespace()\n!sed -e \"s/\\$AUTH_TOKEN/$AUTH_TOKEN/\" /mnt/user/object_store_secret.yaml.tpl > object_store_secret.yaml A - Run a DAG in Airflow In Airflow, run the DAG named spark_read_csv_write_parquet_fts . The DAG\n runs a Spark application that reads CSV data (financial.csv) from an S3\n bucket, transforms the data into Parquet format, and writes the transformed Parquet data\n into the shared volume. Run the DAG Navigate to the Airflow screen using either of the following methods: Click Data Engineering > Airflow Pipelines . Click Tools & Frameworks , select the Data Engineering tab, and click Open in the Airflow tile. In Airflow , verify that you are on the DAGs screen. Click spark_read_csv_write_parquet_fts DAG. NOTE The DAG is pulled from a pre-configured HPE GitHub repository. This DAG is\n constructed to submit a Spark application that pulls financial.csv file into Parquet format, and places the\n converted files in a shared directory. If you want to use your private GitHub\n repository, see Airflow DAGs Git Repository to learn how to configure your\n repository. Click Code to view the DAG code. Click Graph to view the graphical representation of the\n DAG. Click Run (play button). Upon successful DAG\n completion, the data is accessible inside your notebook server in the following\n directory for further processing: //financial-processed\" To view details for the DAG, click Details . Under DAG Details , you can see green, red, and/or yellow\n buttons with the number of times the DAG ran successfully or failed. Click the Success button. To find your job, sort by End Date to see the latest jobs that have run,\n and then scroll to the right and click the log icon under Log URL for that run.\n Note that jobs run with the\n configuration: Conf \"username\":\"your_username\" When running Spark applications using Airflow, you can see the\n following logs: Reading from s3a://ezaf-demo/data/financial.csv; \nsrc format is csv 22/11/04 11:53:26 WARN \nAmazonHttpClient: SSL Certificate checking for endpoints has been explicitly disabled. \nRead complete Writing to file:///mounts/data/financial-processed; dest format is parquet Write complete IMPORTANT The cluster clears the logs that result from the DAG runs.\n The duration after which the cluster clears the logs depends on the Airflow\n task, cluster configuration, and policy. B \u2013 View the Spark Application Once you have triggered the DAG, you can view the Spark application in the Spark\n Applications screen. To view the Spark application, go to Analytics > Spark\n Applications . Alternatively, you can go to Applications & Frameworks and then\n click on the Analytics tab. On the Analytics tab, select the Spark tile and click Open . C \u2013 Run the Jupyter\n Notebook Run the Jupyter notebook file to analyze and visualize the financial time\n series data. To run the notebook: Connect to the notebook server. See Creating and Managing Notebook Servers . In the Notebooks screen, navigate to the /financial-processed/ folder to validate that the data processed by the\n Spark application is available. In the /financial-processed/ folder, open\n the financial_time_series_example.ipynb file. In the sixth cell of the financial_time_series_example.ipynb file, update the user folder name\n as follows: user_mounted_dir_name = \"user\" In the Notebook Launcher, select the second cell of the notebook\n and click Run the selected cells and advance (play icon). After the packages install, restart the notebook kernel. To restart the kernel, click\n the Restart the kernel button or select Kernel > Restart\n Kernel in the menu bar at the top of the screen. After the kernel restarts, click into the second cell and select Run the selected\n cells and advance . Review the results of each notebook cell to analyze and visualize the data. End of Tutorial You have completed this tutorial. This tutorial demonstrated that you can use Airflow,\n Spark, and Notebooks in Unified Analytics to extract, transform, and load data into a shared\n volume and then run analytics and visualize the transformed data. On this page Scenario Steps Prerequisites A - Run a DAG in Airflow B \u2013 View the Spark Application C \u2013 Run the Jupyter\n Notebook End of Tutorial Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/financial-time-series-workflow.html", + "title": "Financial Time Series Workflow" + }, + { + "content": "\nMLflow Bike Sharing Use Case Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour\n based on weather and time. Scenario A data scientist wants to use a Jupyter Notebook to train a model that predicts how many\n bikes will be rented every hour based on weather and time information. HPE Ezmeral Unified Analytics Software includes the\n following components and applications to support this scenario: Dataset Bike sharing dataset, bike-sharing.csv , available in the /shared/mlflow directory. Notebook (Jupyter) Two preconfigured Jupyter notebooks: bike-sharing-mlflow.ipynb - Runs code, trains models, finds the best\n model. bike-sharing-prediction.ipynb - Predicts based on the\n model; deployed via KServe. MLflow Tracks the experiment and trainings/runs. Logs artifacts, metrics, and parameters for each run. Registers the best model Object Storage Stores artifacts that result after running each experiment. KServe Deployment Downloads and deploys a model from object storage and makes the model accessible\n through a web service endpoint. Steps Sign in to HPE Ezmeral Unified Analytics Software and perform the following steps: Run the Bike Sharing Use Case Track Experiment, Runs, and Register a Model in MLflow Use the Model for Prediction Run the Bike Sharing Use Case In the left navigation pane, click Notebooks . Connect to your notebook server instance. For this example, select hpedemo-user01-notebook . Copy the MLFlow folder from the /shared directory\n into the /user directory. NOTE If the Mlflow folder is not available in the /shared directory, perform: Go to GitHub repository for tutorials . Clone the repository. Navigate to ezua-tutorials/Data-Science . Navigate back to the /shared directory. Copy the MLflow folder from the ezua-tutorials/Data-Science repository into the /shared directory. Copy the /MLflow folder from /shared folder to /user directory. Open bike-sharing-mlflow.ipynb and import mlflow and\n install libraries. When done, restart the kernel and run the cell. NOTE If you are using the local s3-proxy, do not set the following environment\n variables for MLflow. However, if you are trying to connect from outside the\n cluster, you must set the following environment\n variables. os.environ[\"AWS_ACCESS_KEY_ID\"] = os.environ['MLFLOW_TRACKING_TOKEN']\nos.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"s3\"\nos.environ[\"AWS_ENDPOINT_URL\"] = 'http://local-s3-service.ezdata-system.svc.cluster.local:30000'\nos.environ[\"MLFLOW_S3_ENDPOINT_URL\"] = os.environ[\"AWS_ENDPOINT_URL\"]\nos.environ[\"MLFLOW_S3_IGNORE_TLS\"] = \"true\"\nos.environ[\"MLFLOW_TRACKING_INSECURE_TLS\"] = \"true\" Run the notebook cells. Running the notebook returns the details of the best\n model: Track Experiment, Runs, and Register a Model in MLflow Navigate to the MLflow UI. You should see the bike-sharing-exp experiment. Select the best model and then select Register Model . In this\n example, the best model is run 2. In the Register Model window, enter Bike_Sharing_Model and click Register . Click on the Models menu to view the registered models. Use the Model for Prediction Navigate to the notebook server and open bike-sharing-prediction.ipynb . Run the first cell and wait until the bike-sharing-predictor pod goes\n into the running state. Run the second cell to deploy machine learning model using KServe inference service.\n Note: Update DOMAIN_NAME to your domain for external access and save changes. The\n system prints the following predictions for the input: Rendted Bikes Per Hours:\nInput Data: {'season': 1, 'year': 2, 'month': 1, 'hour_of_day': 0, 'is_holiday': 0, 'weekday': 6, 'is_workingday': 0, 'weather_situation': 1, 'temperature': 0.24, 'feels_like_temperature': 0.2879, 'humidity': 0.81, 'windspeed': 0.0} \nBike Per Hour: 108.90178471846806\nInput Data: {'season': 1, 'year': 5, 'month': 1, 'hour_of_day': 0, 'is_holiday': 0, 'weekday': 6, 'is_workingday': 1, 'weather_situation': 1, 'temperature': 0.24, 'feels_like_temperature': 0.2879, 'humidity': 0.81, 'windspeed': 0.0} \nBike Per Hour: 84.96339548602367 End of Tutorial You have completed this tutorial. This tutorial demonstrated how to train a model using\n notebooks, track experiments and runs, log artifacts with MLFlow, and use KServe to deploy\n and predict models. On this page Scenario Steps Run the Bike Sharing Use Case Track Experiment, Runs, and Register a Model in MLflow Use the Model for Prediction End of Tutorial Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/tutorial-mlflow-bike-share.html", + "title": "MLflow Bike Sharing Use Case" + }, + { + "content": "\nMNIST Digits Recognition Workflow Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Scenario NOTE An administrator must prepare the environment for this tutorial to work. See Preparing the Tutorial Environment . A data scientist wants to use a Jupyter Notebook\n to train a model that recognizes numbers in images. The image files reside in object storage\n and need to be transformed into Parquet format and put into a shared directory in the shared\n volume that the data scientist\u2019s notebook is mounted to. HPE Ezmeral Unified Analytics Software includes the\n following components and applications to support an end-to-end workflow for this scenario: Spark Spark application that pulls images from the HPE Ezmeral Data Fabric Object Store\n via MinIO endpoint, transforms the images into Parquet format, and puts the Parquet\n data into the shared directory in the shared volume. Airflow Coded Airflow DAG that runs the Spark application. Notebook (Jupyter) Preconfigured Jupyter notebook mounted to the shared volume to run code and train\n models for the following Kubeflow pipelines: Run experiments with Katib to pick the best model and then deploy the model\n using KServe. Full training with TensorFlow jobs. The following diagram shows the components and applications in the workflow: Steps Sign in to HPE Ezmeral Unified Analytics Software and perform the following steps: Prerequisites A - Run a DAG in Airflow B \u2013 View the Spark Application C- Update Path of Spark Generated Results D - Train the Model E - Serve the Model Prerequisites Connect to your Jupyter notebook and perform setup tasks to prepare the environment to\n train the model. A folder with a sample notebook file and\n SSL certificate is provided for the purpose of this tutorial. To connect your notebook\n and perform setup tasks: In the HPE Ezmeral Unified Analytics Software , go to Applications &\n Frameworks . Select the Data Science tab and then click Open in the Kubeflow tile. In Kubeflow, click Notebooks to open the notebooks page. Click Connect to connect to your notebook server. Go to the / folder. Copy the template object_store_secret.yaml.tpl file from the shared/ezua-tutorials/Data-Analytics/Spark directory to the folder. In the /MNIST-Digits-Recognition folder, open the mnist_katib_tf_kserve_example.ipynb file. NOTE If you do not see the MNIST-Digits-Recognition folder in the folder, copy the folder from the /shared/ezua-tutorials/Data-Science/Kubeflow directory into the folder. The /shared directory is\n accessible to all users. Editing or running examples from the /shared directory is not advised. The directory is specific to\n you and cannot be accessed by other users If the MNIST-Digits-Recognition folder is not available in the /shared/ezua-tutorials/Data-Science/Kubeflow directory,\n perform: Go to GitHub repository for tutorials . Clone the repository. Navigate to ezua-tutorials/Data-Science/Kubeflow . Navigate back to the directory. Copy the MNIST-Digits-Recognition folder from the ezua-tutorials/Data-Science/Kubeflow directory into the directory. To generate a secret to read data source files from S3 bucket by Spark application\n (Airflow DAG), run the first cell of the financial_time_series_example.ipynb file: import kfp\nkfp_client = kfp.Client()\nnamespace = kfp_client.get_user_namespace()\n!sed -e \"s/\\$AUTH_TOKEN/$AUTH_TOKEN/\" /mnt/user/object_store_secret.yaml.tpl > object_store_secret.yaml A - Run a DAG in Airflow In Airflow, run the DAG named spark_read_write_parquet_mnist . The DAG runs a Spark\n application that pulls the images from object storage, transforms the data into Parquet\n format, and writes the transformed Parquet data into the shared volume. Go to Airflow using either of the following methods: Click Data Engineering > Airflow Pipelines . Click Applications & Frameworks , select the Data Engineering tab, and click Open in the Airflow tile. In Airflow , verify that you are on the DAGs tab. Click on the spark_read_write_parquet_mnist DAG. NOTE The DAG is\n pulled from a pre-configured HPE GitHub repository. This DAG is constructed to submit\n a Spark application that pulls ubyte.gz files from an object storage bucket, converts\n the images into Parquet format, and places the converted files in a shared directory.\n If you want to use your private GitHub repository, see Configuring Airflow to find the steps to configure your\n repository. Click Code to view the DAG code. Click Graph to view the graphical representation of the DAG. Click Run (play button). Upon successful DAG completion, the data is accessible inside your notebook server by\n default in the following directory for further processing: //mnist-spark-data/ To view details for the DAG, click Details . Under DAG Details , you can\n see green, red, and/or yellow buttons with the number of times the DAG ran successfully\n or failed. Click the Success or Failed button. To find your job, sort by End Date to see the latest jobs that have run, and\n then scroll to the right and click the log icon under Log URL for that run. Note that\n jobs run with the\n configuration: Conf \"username\":\"your_username\" IMPORTANT The cluster clears the logs that result from the DAG runs.\n The duration after which the cluster clears the logs depends on the Airflow task,\n cluster configuration, and policy. B \u2013 View the Spark Application After you run the DAG, you can view the status of the Spark application in the Spark\n Applications screen. To view the Spark application, go to Analytics > Spark Applications .\n Alternatively, you can go to Applications & Frameworks and then click on the Analytics tab. On the Analytics tab, select the Spark Operator tile and click Open . Identify the spark-mnist-- application, for example spark-mnist-hpedemo-user01-20230728103759 , and view the status of the\n application.. Optionally, in the Actions column, click View YAML . C- Update Path of Spark Generated Results Open mnist_katib_tf_kserve_example.ipynb file. In the third cell of the mnist_katib_tf_kserve_example.ipynb file, update the user folder\n name as follows: user_mounted_dir_name = \"user\" D - Train the Model To train the model: In the Notebook Launcher, select the second cell of the notebook and click Run the selected cells\n and advance (play icon). After the packages install, restart the notebook kernel. To restart the kernel, click\n the Restart the kernel button or select Kernel > Restart\n Kernel in the menu bar at the top of the screen. After the kernel restarts, click into the third cell and select Run Selected Sell and All\n Below . In the second to last cell, follow the Run Details link to open your\n Kubeflow Pipeline. Run the Kubeflow pipeline in the UI and wait for it to successfully complete. To get details about components created by the pipeline run, go to the Experiments\n (AutoML) and Models pages in the Kubeflow UI. E - Serve the Model To serve the model with KServe and get the prediction, wait for the the Kubeflow pipeline\n to successfully complete the run. The output displays the following results: End of Tutorial You have completed this tutorial. This tutorial demonstrated that you can use Airflow,\n Spark, and Notebooks in HPE Ezmeral Unified Analytics Software to extract, transform, and load data into a shared volume and then run\n analytics and train models using Kubeflow pipelines. On this page Scenario Steps Prerequisites A - Run a DAG in Airflow B \u2013 View the Spark Application C- Update Path of Spark Generated Results D - Train the Model E - Serve the Model End of Tutorial Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/workflow-mnist-digits-recognition.html", + "title": "MNIST Digits Recognition Workflow" + }, + { + "content": "\nRetail Store Analysis Dashboard (Superset) Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Scenario A data analyst wants to visualize\n data sets from MySQL, SQL Server, and Hive data sources in Superset. The data analyst signs\n in to HPE Ezmeral Unified Analytics Software and\n connects Unified Analytics to\n MySQL, SQL Server, and Hive data sources. The data analyst runs a federated query against\n the data sets and then creates a view from the query. The analyst accesses the view from\n Superset and uses it to visualize the data in a bar chart and adds the chart to a dashboard. HPE Ezmeral Unified Analytics Software includes the\n following components and applications to support an end-to-end workflow for this scenario: EzPresto An MPP SQL query engine that runs accelerated queries against connected data sources\n and returns results to Superset for visualization. EzPresto connects to Superset through a database\n connection, enabling direct access to the data sources connected to Unified Analytics from Superset. Superset An analytical dashboarding application that communicates with EzPresto to send queries and receive the query\n results needed to visualize data from the selected data sets. The following diagram shows the components and applications in the workflow: Steps Sign in to HPE Ezmeral Unified Analytics Software and perform the following steps: A - Connect Data Sources B \u2013 Select Data Sets and Create a View C - Connect to the Presto Database D - Add the View to Superset and Create a Chart E - Specify Query Conditions to Visualize Results in the Chart F \u2013 Create a Superset Dashboard and Add the Chart (Visualized Data) G \u2013 Monitor Queries IMPORTANT This tutorial demonstrates how to perform a series of tasks in HPE Ezmeral Unified Analytics Software to\n complete an example workflow. The data and information used in this tutorial is for\n example purposes only. You must connect Unified Analytics to your own data sources and use the data sets available to you in\n your data sources. A - Connect Data Sources Connect HPE Ezmeral Unified Analytics Software to\n external data sources that contain the data sets (tables and views) you want to work with.\n This tutorial uses MySQL, SQL Server, and Hive as the connected data source examples. To connect a data source: In the left navigation column, select Data Engineering > Data\n Sources . The Data Sources screen appears. Click Add New Data Source . Complete the steps required to connect to the MySQL, SQL Server, and Hive data sources: Connecting to MySQL In the Add New Data Source screen, click Create\n Connection in the MySQL tile. In the drawer that opens, enter the following information in the respective\n fields: Name : mysql Connection URL : jdbc:mysql://: Connection User : myaccount Connection Password : moi123 Enable Local Snapshot Table : Select the check box TIP When Enable Local Snapshot Table is selected, the system caches remote\n table data to accelerate queries on the tables. The cache is active for\n the duration of the configured TTL or until the remote tables in the\n data source are altered. Enable Transparent Cache : Select the check box TIP When Enable Transparent Cache is selected, the system caches data\n at runtime when queries access remote tables. As the query engine scans\n data in remote data sources, the scanned data is cached on the fly.\n Results for subsequent queries on the same data are quickly returned\n from the cache. The cache lives for the duration of the\n session. Click Connect . Upon successful connection, the system returns the\n following\n message: Successfully added data source \"mysql\". Connecting to SQL Server In the Add New Data Source screen, click Create\n Connection in the SQL Server tile. In the drawer that opens, enter the following information in the respective\n fields: Name : mssql_ret2 Connection URL :\n jdbc:sqlserver::;database=retailstore Connection User : myaccount Connection Password : moi123 Enable Local Snapshot Table : Select the check box TIP When Enable Local Snapshot Table is selected, the system caches remote\n table data to accelerate queries on the tables. The cache is active for\n the duration of the configured TTL or until the remote tables in the\n data source are altered. Enable Transparent Cache : Select the check box TIP When Enable Transparent Cache is selected, the system caches data\n at runtime when queries access remote tables. As the query engine scans\n data in remote data sources, the scanned data is cached on the fly.\n Results for subsequent queries on the same data are quickly returned\n from the cache. The cache lives for the duration of the\n session. Click Connect . Upon successful connection, the system returns the\n following\n message: Successfully added data source \"mssql_ret2\". Connecting to Hive In the Add New Data Source screen, click Create\n Connection in the Hive tile. In the drawer that opens, enter the following information in the respective\n fields: Name : hiveview Hive Metastore : file Hive Metastore Catalog Dir : file:///data/shared/tmpmetastore In Optional Fields , search for the following fields and add the\n specified values: Hive Max Partitions Per Writers : 10000 Hive Temporary Staging Directory Enabled : Unselect Hive Allow Drop Table : Select Enable Local Snapshot Table : Select the check box TIP When Enable Local Snapshot Table is selected, the system caches remote\n table data to accelerate queries on the tables. The cache is active for\n the duration of the configured TTL or until the remote tables in the\n data source are altered. Enable Transparent Cache : Select the check box TIP When Enable Transparent Cache is selected, the system caches data\n at runtime when queries access remote tables. As the query engine scans\n data in remote data sources, the scanned data is cached on the fly.\n Results for subsequent queries on the same data are quickly returned\n from the cache. The cache lives for the duration of the\n session. Click Connect . Upon successful connection, the system returns the\n following\n message: Successfully added data source \"hiveview\". B \u2013 Select Data Sets and Create a View In HPE Ezmeral Unified Analytics Software , complete\n the following steps to create a view. First select data sources and data sets to work with.\n Then, run a federated query against the selected data sets and create a view from the query.\n This tutorial creates an example view named qf_retailstore_view . Select datasets. In the left navigation bar, select Data Engineering > Data\n Catalog . On the Data Catalog page, click the dropdown next to the mysql and mssql_ret2 data sources to expose the available schemas in those data\n sources. Select schemas for each of the data sources: For the mysql data source, select the retailstore schema. For the mssql_ret2 data source, select the dbo schema. In the All Datasets section, click the filter icon to open the Filters drawer. Use the filter to identify and select the following data sets in the selected\n schemas: For the dbo schema, filter for and select the following datasets: call_center catalog_sales data_dim item For the retailstore schema, filter for and select the following\n datasets: customer customer_address customer_demographics After you select all the data sets, click Apply . Click Selected Datasets (button that is displaying the number of selected\n data sets). In the drawer that opens, click Query Editor . Depending on the number of\n selected data sets, you may have to scroll down to the bottom of the drawer to see\n the Query Editor button. Query the datasets and create a view. In the Query Editor , click + to Add Worksheet . Run the following command to create a new schema, such as hiveview.demoschema , for\n example: create schema if not exists hiveview.demoschema; Run a query to create a new view from a federated query against the selected data\n sets, for\n example: create view hiveview.demoschema.qf_retailstore_view as select * from mssql_ret2.dbo.catalog_sales cs\ninner join mssql_ret2.dbo.call_center cc on cs.cs_call_center_sk = cc.cc_call_center_sk\ninner join mssql_ret2.dbo.date_dim d on cs.cs_sold_date_sk = d.d_date_sk\ninner join mssql_ret2.dbo.item i on cs.cs_item_sk = i.i_item_sk\ninner join mysql.retailstore.customer c on cs.cs_bill_customer_sk = c.c_customer_sk\ninner join mysql.retailstore.customer_address ca on c.c_current_addr_sk = ca.ca_address_sk\ninner join mysql.retailstore.customer_demographics cd on c.c_current_cdemo_sk = cd.cd_demo_sk Click Run . When the query completes, the status \" Finished \"\n displays. C - Connect to the Presto Database Complete the following steps to connect Superset to the Presto database for access to your\n data sources and data sets in HPE Ezmeral Unified Analytics Software . Once connected to the Presto database, you can access the\n view you created in the previous step (step B). To connect to the Presto database, you need\n the connection URI. You can get the URI from your HPE Ezmeral Unified Analytics Software administrator. To open Superset, in the left navigation pane of HPE Ezmeral Unified Analytics Software , select BI Reporting >\n Dashboards . Superset opens in a new tab. In Superset, select Settings > Database Connections . Click +DATABASE . In the Connect a database window, select the Presto tile. Enter the SQLALCHEMY URI provided by your administrator. Test the connection. If the test was successful, click Connect . D - Add the View to Superset and Create a Chart Complete the following steps to import the view you created in HPE Ezmeral Unified Analytics Software and create a bar\n chart. This tutorial demonstrates how to import the view qf_retailstore_view . In the left navigation bar, select BI Reporting > Dashboards to\n open Superset. In Superset, click the Datasets tab. Click +DATASET . In the Add Dataset window, select the following options: DATABASE: Presto SCHEMA: SEE TABLE SCHEMA: This tutorial uses the retailstore schema and qf_retailstore_view . Click ADD DATASET AND CREATE CHART . In the Create a New Chart window, select Bar Chart . Click CREATE NEW CHART . Enter a name for the chart, such as Retail Store View . E - Specify Query Conditions to Visualize Results in the Chart In Superset, charts visualize data based on the query conditions that you specify. The\n charts created in Superset automatically generate queries that Superset passes to the SQL\n query engine. Superset visualizes the query results in the chart. Try applying query\n conditions to visualize your data. Save your chart when done. The following steps demonstrate how query conditions were applied to visualize data in the\n resulting example bar chart (shown in step 2): Enter the specified query parameters in the following fields: METRICS Click into the METRICS field (located on the DATA tab). A\n metrics window opens. Select the Simple tab. Click the edit icon and enter a name for the metric, such as SUM(cs_net_paid) . In the Column field, select cs_net_paid . In the Aggregate field, select SUM . Click Save . FILTERS Click into the FILTERS field (located on the DATA tab). In the window that opens, select the CUSTOM SQL tab. Select the WHERE filter and enter the\n following: NULLIF(ca_state, '') IS NOT NULL Click Save . DIMENSIONS Drag and drop the ca_state column into the DIMENSIONS field. Click into the BREAKDOWNS column. In the window that opens, select the SIMPLE tab and select the cc_name column. Click Save . SORT BY Click into the SORT BY field. In the window that opens, select the SIMPLE tab and enter cs_net_paid as the COLUMN and SUM as the AGGREGATE. Click Save . Click CREATE CHART . The bar chart displays results when the query finishes\n processing. Click Save to save the chart. In the Save Chart window that opens, do\n not enter or select a dashboard. Click Save to continue. F \u2013 Create a Superset Dashboard and Add the Chart (Visualized Data) Complete the following steps to create a new dashboard and add your chart to the dashboard.\n This tutorial adds the Retail Store View chart to a dashboard named Retail Store\n Analysis Dashboard . To create a new dashboard and add your visualized data: In Superset, click on the Dashboards tab. Click + DASHBOARD . Enter a name for the dashboard, for example Retail Store Analysis Dashboard . Drag and drop your chart into the dashboard. Click Save to save the dashboard. NOTE Any time you open a chart or dashboard, Superset and the SQL query engine work\n together to visualize data. Loading a dashboard page triggers the queries against the\n database. As the queries run, buffering icons display until the data loads. When data is\n loaded, the visualizations display. G \u2013 Monitor Queries You can monitor queries generated through Superset from the EzPresto endpoint. You can access the EzPresto endpoint in the EzPresto tile in the Applications &\n Frameworks space in HPE Ezmeral Unified Analytics Software . Complete the following steps to monitor the query that the chart generates: Return to the HPE Ezmeral Unified Analytics Software UI. In the left navigation bar, select Applications &\n Frameworks . On the Data Engineering tab, click the EzPresto endpoint in the EzPresto tile. The EzPresto UI\n opens in a new tab. In the Query Details section, verify that Finished is selected. Selected\n options have a visible checkmark. You can see the\n query that ran to populate the Retail Store View bar chart in the Retail Store\n Analysis Dashboard . Click on the Query ID to see the query details. To see a visualized query plan and metadata for the query, click Live Plan and\n hover over different areas of the visualized plan. You can also click on various parts\n of the visualized plan to zoom in on details. End of Tutorial You have completed this tutorial. This tutorial demonstrated the integration of the HPE Ezmeral Unified Analytics Software SQL query\n engine ( EzPresto ) with Superset to visualize the results of a query on data\n sets made available through the default Presto database connection. This tutorial also\n showed you how to monitor queries from the EzPresto Cluster Monitoring tool. On this page Scenario Steps A - Connect Data Sources B \u2013 Select Data Sets and Create a View C - Connect to the Presto Database D - Add the View to Superset and Create a Chart E - Specify Query Conditions to Visualize Results in the Chart F \u2013 Create a Superset Dashboard and Add the Chart (Visualized Data) G \u2013 Monitor Queries End of Tutorial Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/ua-workflow-ezsql-superset.html", + "title": "Retail Store Analysis Dashboard (Superset)" + }, + { + "content": "\nSubmitting a Spark Wordcount Application Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Preparing the Tutorial Environment Describes how to prepare the environment for the Financial Time Series and MNIST Digits Recognition tutorials. Data Source Connectivity and Exploration Provides basic steps for using the Data Engineering space within HPE Ezmeral Unified Analytics Software . BI Reporting (Superset) Basics Provides basic steps for using the BI Reporting (Superset) space within HPE Ezmeral Unified Analytics Software . Candy Sharing Tutorial (Kale) Describes how Kale converts Notebook to pipeline by applying notebook annotations. Feast Ride Sharing Use Case Provides an end-to-end workflow using Feast in HPE Ezmeral Unified Analytics Software to generate training data and perform online model inference for the ride-sharing driver satisfaction model. Financial Time Series Workflow Describes how to use HPE Ezmeral Unified Analytics Software to run a Spark application from an Airflow DAG and then run a Jupyter notebook to analyze and visualize data that the Spark application puts into a shared directory in the shared volume that the data scientist\u2019s notebook is mounted to. MLflow Bike Sharing Use Case Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MLflow prediction model to determine bike rentals per hour based on weather and time. MNIST Digits Recognition Workflow Provides an end-to-end workflow in HPE Ezmeral Unified Analytics Software for an MNIST digits recognition example. Retail Store Analysis Dashboard (Superset) Provides an end-to-end workflow example for a retail store analysis scenario in HPE Ezmeral Unified Analytics Software using EzPresto and Superset. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark Application in HPE Ezmeral Unified Analytics Software . Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Submitting a Spark Wordcount Application Provides an end-to-end example for creating and submitting a wordcount Spark\n Application in HPE Ezmeral Unified Analytics Software . Prerequisites Sign in to HPE Ezmeral Unified Analytics Software . For the wordcount Spark Application, the application argument is file:///mounts/shared-volume/spark/wordcount.txt . For the wordcount Spark Application, the application class name is org.apache.spark.examples.JavaWordCount . Ensure that you have the spark-examples_212-32016-eep-810.jar file available in the auto_spark_test_data directory and the wordcount.txt file available in the spark directory. NOTE Always create the auto_spark_test_data and spark directory in the shared directory. In the left navigation column, navigate to Data Engineering \u2192\n Data Sources . Click Browse . Select the shared directory. Select the auto_spark_test_data directory and verify\n that you have the spark-examples_212-32016-eep-810.jar file. Select the spark directory and verify that you have the wordcount.txt file. NOTE If the spark-examples_212-32016-eep-810.jar file is\n not available in the auto_spark_test_data directory\n and the wordcount.txt file is not available in the spark directory, follow these steps: Go to GitHub repository for tutorials . Navigate to ezua-tutorials/Data-Analytics/Spark in\n the repository and download the spark-examples_212-32016-eep-810.jar file and the wordcount.txt file. Upload the spark-examples_212-32016-eep-810.jar file to the auto_spark_test_data directory\n in HPE Ezmeral Unified Analytics Software . Upload the wordcount.txt file to the spark directory in HPE Ezmeral Unified Analytics Software . Submitting Wordcount Spark\n Application The wordcount Spark application counts the number of\n occurrences of each unique word in the wordcount.txt input\n file. In HPE Ezmeral Unified Analytics Software , use one the following methods to go to Spark\n Applications : In the left navigation bar, click the Analytics\n icon and click Spark\n Applications . In the left navigation bar, click the Applications &\n Frameworks icon. On the Analytics tab, click Open in the Spark tile. Click Create Application on the Spark Applications\n screen. Navigate through each step within the Create Spark\n Application wizard: Application Details : Create a new application.\n Set the following boxes: Name: Enter the application name as username-wordcount . NOTE The\n application name must be unique. Description: Enter the application description. For example: This\n application counts words in a text file. Configure Spark Application : Set the following\n boxes: Type: Select the application type as Java. Source: Select the main application file source as Shared Directory . Filename: Click Browse , and select spark-examples_212-32016-eep-810.jar file from the auto_spark_test_data directory. Class Name: Enter org.apache.spark.examples.JavaWordCount as main class of the application. Arguments: Enter file:///mounts/shared-volume/spark/wordcount.txt as input parameter required by the username-wordcount Spark\n application. Click Dependencies . The wordcount application\n does not require any additional dependencies. Click Driver Configuration . When boxes in this\n wizard are left blank, default values are set. The default values are as\n follows: Number of Cores: 1 Core Limit: unlimited Memory: 1g Click Executor Configuration . When boxes in this\n wizard are left blank, default values are set. The default values are as\n follows: Number of Executors: 1 Number of Cores per Executor: 1 Core Limit per Executor: unlimited Memory per Executor: 1g Click Schedule Application . If you want to\n schedule a Spark application, see Creating Spark Applications for details. Click Review. To view the application\n configuration, click Edit YAML . To apply the\n changes, click Save Changes . To cancel the\n changes, click Discard Changes . You\n can also click the pencil icon in each\n section to navigate to the specific step to change the application\n configuration. Click Create Spark Application on the bottom right of\n the Review step. The wordcount Spark application is created and submitted. You can view it on the Spark Applications screen. You can also view the logs to check the output of the wordcount application. To see the\n logs, click the menu icon in the Actions column of the username-wordcount application, and click View\n Logs . NOTE Note: After you are finished with your task, click the menu icon in the Actions column of\n the username-wordcount application and\n click Delete . Alternatively, you can select a checkbox\n for the application you want to delete and click Delete on\n the top right pane of the table. On this page Prerequisites Submitting Wordcount Spark\n Application Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Tutorials/Tutorials/creating-spark-apps-workflow-UI.html", + "title": "Submitting a Spark Wordcount Application" + }, + { + "content": "\nResources Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . About Provides an overview of HPE Ezmeral Unified Analytics Software . Tutorials Provides a set of tutorials that you can use to experience HPE Ezmeral Unified Analytics Software and the included applications, such as tutorials for data science and data analytics workflows with notebooks and applications like Spark, MLflow, Feast, Airflow, and EzPresto. Resources Provides links to additional resources such as product licensing information, on-demand training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Resources Provides links to additional resources such as product licensing information, on-demand\n training, videos, blogs, and HPE Ezmeral Unified Analytics Software community. In addition to the product documentation, you may be interested in the following\n resources: Download Documentation Click here to download a PDF of the HPE Ezmeral Unified Analytics Software documentation. Contact for Support Get in touch with HPE Ezmeral Unified Analytics Software support team through HPE Support Center . HPE Ezmeral Software Resources Slack Community for Developers https://slack.hpedev.io/ Videos, Reports, and Case Studies https://www.hpe.com/us/en/resource-library.html HPE GreenLake Marketplace https://www.hpe.com/us/en/software/marketplace.html/platform/ezmeraldata Glossary To find the list of terms (with description) used in HPE Ezmeral Unified Analytics Software documentation. See Glossary . On this page Download Documentation Contact for Support HPE Ezmeral Software Resources Glossary Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Resources/resources.html", + "title": "Resources" + }, + { + "content": "\nAdministration Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/administration.html", + "title": "Administration" + }, + { + "content": "\nInstallation Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the installation prerequisites. Installing HPE Ezmeral Unified Analytics Software on OpenShift Provides the prerequisites and steps for installing HPE Ezmeral Unified Analytics Software in an OpenShift cluster and also lists the current limitations. HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and air-gapped environments. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the installation prerequisites. Installing HPE Ezmeral Unified Analytics Software on OpenShift Provides the prerequisites and steps for installing HPE Ezmeral Unified Analytics Software in an OpenShift cluster and also lists the current limitations. HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and air-gapped environments. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Installation/ua-installation.html", + "title": "Installation" + }, + { + "content": "\nInstalling on User-Provided Hosts (Connected and Air-gapped Environments) Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the installation prerequisites. Installation Prerequisites Lists the prerequisites for HPE Ezmeral Unified Analytics Software installation on user-provided hosts in connected (internet access) and air-gapped (no internet access) environments. Post Installation Steps Provides steps to complete after you install HPE Ezmeral Unified Analytics Software on user-provided hosts. Installing HPE Ezmeral Unified Analytics Software on OpenShift Provides the prerequisites and steps for installing HPE Ezmeral Unified Analytics Software in an OpenShift cluster and also lists the current limitations. HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and air-gapped environments. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and\n air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the\n installation prerequisites. HPE Ezmeral Unified Analytics Software supports bare metal and VM installations on AWS, GCP, and Azure. You can install HPE Ezmeral Unified Analytics Software from a laptop or host\n machine. Complete the following steps to install HPE Ezmeral Unified Analytics Software on a bare metal machine or virtual machine (VM): Review the prerequisites and verify\n that the requirements have been met. Run the installation script to\n access the Installer Web UI. In the Installer Web UI, provide the pertinent information on each of the following\n screens: Node Setup Installation\n Details User Authentication\n Details Complete the post-installation\n steps. Prerequisites Verify that you have configured a pool of user-provided hosts that meet\n the installation prerequisites. See Installation Prerequisites . You must have the HPE Ezmeral Unified Analytics Software downloaded before you follow the instructions in this document.\n The download includes the software binaries, installation script, Airgap Utility (for\n air-gapped environments), and a README.txt file. After purchasing HPE Ezmeral Unified Analytics Software , the downloads\n are made available to you through the Access your products button in the HPE\n Subscription Electronic Receipt email that you receive from HPE. The HPE Ezmeral Unified Analytics Software deployment runs on a Kubernetes cluster. Components within HPE Ezmeral Unified Analytics Software cannot launch\n until they download their respective container images. How the components download the\n container images depends on your environment. The following table describes container\n downloads in different environments: Environment Description Direct connection If the machine is directly connected to the internet (UI accessible),\n you do not have to provide any proxy settings during installation. However,\n the firewall settings can prevent the packages from being\n downloaded. Proxy connection If the machine is connected to the internet via proxy, you must provide\n the proxy server information for http, https, and no_proxy during\n installation Air-gapped environment The Airgap Utility prerequisite describes the requirements for an\n air-gapped environment. See Installation Prerequisites . Run the Installation Script to Access the Installer Web UI To run the installation script and open the Installer Web UI, complete the following\n steps: Go to the directory where you extracted the installer bundle ( HPE_Ezmeral_Unified_Analytics_Installer_S1U85-70016.star ): cd S1U85-70016 Run the installation script\n on a host, but do\n not run it on the hosts used to deploy HPE Ezmeral Unified Analytics Software . See Installation Prerequisites for\n details. ./start_ezua_installer_ui.sh The\n launcher guides you through the prompts to start the Installer Web UI. NOTE If you get a permission denied error, run chmod +x\n start_ezua*.sh before you run the installation script. If the image is locally available, the container starts right away. If the image\n is not local, it takes time to download the image. Time for the image to download\n and start the container UI depends on network speed. If you ran the script on a laptop, you can access the installer UI by connecting\n to the browser using localhost:8080 . If you ran the script on a\n different node, you can access the installer UI by connecting to the browser using :8080 . Verify that port 8080 is opened\n through firewalls from the laptop to the node running the installer. If proxy settings are present in the environment, include the master node DNS\n names of the workload and coordinator clusters in the NO_PROXY list. On the screen that appears, select one of the following options: TIP The Ezmeral Coordinator is the\n component that orchestrates the deployment of HPE Ezmeral Unified Analytics Software instances. Installation Using New Ezmeral Coordinator For first time installation, select this option. When you install with a new Ezmeral Coordinator , you designate the control plane ( Ezmeral Coordinator and management\n cluster nodes) and worker nodes, as described in the following section, Node Setup . Installation Using Existing Ezmeral Coordinator If you previously installed HPE Ezmeral Unified Analytics Software , select this option to use the existing Ezmeral Coordinator to\n create a new HPE Ezmeral Unified Analytics Software cluster. All files in the existing cluster are cleared, except for the kubeconfig file for the Ezmeral Coordinator . You do not\n have to reconfigure the management cluster or upload the configuration file\n again. On the Select your deploy target screen, select Install in the Bare\n Metal or VM tile. The Node Setup screen appears. Node Setup Node setup sets up the control plane and worker nodes. You can upload a YAML file or\n manually configure the nodes through fields in the Installer Web UI. You\n have the option of running an installation pre-check script, as described in step 2 of\n this section. The following table describes control plane and worker nodes: Node Type Description Minimum Required Minimum Required for High Availability Control Plane Enter a comma-separated list of nodes (IP addresses). If you\n chose to install using a new Ezmeral Coordinator , the first node listed becomes the Ezmeral Coordinator node. This node\n orchestrates the deployment of HPE Ezmeral Unified Analytics Software instances. The remainder of the nodes in the list\n serve as the management cluster. Installation Using New Ezmeral Coordinator (First-time installation) 2 * 4 ** Installation Using Existing Ezmeral Coordinator 1 3 Worker Enter a comma-separated list of nodes (IP addresses). These nodes run the HPE Ezmeral Unified Analytics Software service. Calculate the number of worker nodes based on the VCPUs you enter in step\n 7. Must be a minimum of 96 VCPUs. The accumulated total VCPU of the worker nodes\n should match or exceed the number of VCPUs that you enter in step 7. 3 N/A * Requires one node for the Ezmeral Coordinator and one node for the workload. ** Requires one node for the Ezmeral Coordinator and three nodes for the workload. IMPORTANT Either the SSH password or SSH key is required. The SSH pass phrase is optional and\n only applicable if the SSH key is provided. Wall clock time on the hosts in the deployment must be synchronized. On the Node Setup screen, complete the following steps: Upload a YAML or complete the fields to manually configure the nodes. If you upload a YAML File, the system runs a validation check against\n the file and returns an error message if the file is invalid. TIP A YAML\n template file is provided and includes the following fields: controlplanes: \"\" # comma-separated list of ip values\nworkers: \"\" # comma-separated list of ip values\nssh_username: root\nssh_password: \"\"\nssh_key: \"\" # base64 encoded string\nssh_passphrase: \"\" (Optional) Run the installation pre-check script. The installation pre-check script\n runs checks against each of the host machines configured for HPE Ezmeral Unified Analytics Software , including the Ezmeral Coordinator , control\n plane, and worker hosts. The script also does an aggregated check to verify that the\n hosts, operating as a cluster, have enough resources to support the installation. Running the pre-check script can take some time; however, you can continue the\n installation as the pre-check runs in the background. When the pre-check completes,\n the system posts a message stating that the pre-check succeeded or failed. In case of\n failure, return to the Node Setup screen. The system posts the node(s) that did\n not pass pre-check. In case of failure, complete the following steps to access the\n logs in the UI container: To exec into the Docker container,\n run: docker exec -it node-server bash Go to the prechecks directory: cd /tmp/prechecks To view the prechecks logs: cat prechecksLogs.txt Check the logs for errors and resolve the errors. Once resolved, run the\n installation pre-check again to verify that the issue is resolved and then\n complete installation. Click Next to proceed to Installation Details . Installation Details On the Installation Details screen, complete the following steps: Complete the following fields: Field Description Installation Name Enter a unique name for the installation. The installation name must\n consist of lowercase alphanumeric characters or - . For example, installation-1 . This name becomes the name of the cluster\n namespace. In the future, if you need to add additional hosts to increase\n resources for applications, you will use this name as the namespace when adding\n hosts, as described in Expanding the Cluster . Domain Name Enter a valid DNS domain name to connect to the cluster via the browser. NOTE The HPE Ezmeral Unified Analytics Software cluster domain name cannot be the same as the DNS\n host domain name. Do not enter your corporate top level domain (TLD) name in this field.\n If you enter the corporate TLD name, you must set up a wildcard record\n that points all subdomains of the corporate domain to the HPE Ezmeral Unified Analytics Software ingress gateway hosts. Best practice is to enter a subdomain off the corporate domain. For\n example, if your corporate domain is company.com , you\n could enter ezua.company.com as your domain name. As you continue the installation process, you will set up wildcard\n records for the domain name you enter in this field. The DNS name\n resolution to those records should work for pods and any member of your\n organization that needs access to HPE Ezmeral Unified Analytics Software . VCPU The number of VCPUs that you enter is determined by the number of worker\n nodes. Typically, 96 VCPUs translates to three worker nodes, and entering 97\n would translate to four worker nodes. If you need to distinguish between cores\n and VCPUs, for example in cases where hyperthreading is enabled, run the lscpu tool to accurately determine the VCPUs for your\n hosts. High Availability When selected, three controller nodes are enabled. Currently, HA is\n available for the workload cluster only. The management cluster does not support\n HA. Use GPU See GPU Support . Air Gap Environment Select this option when installing in an air-gapped environment (no\n internet access). If you select Air Gap Environment, you must provide the\n registry details. Registry URL Enter the registry URL. Only required for air-gapped environments, but can\n also be used for a custom image registry in connected environments. Make sure\n you add the trailing / at the end of the URL, as shown in the\n following example: my-registry.mip.storage.mycompany.net/ezua/ Username Enter the user name for the administrative user. Password Enter the password for the administrative user. Registry Insecure Select this option if the registry is not secure. If the registry is\n secure, do not select this option. CA Certificate Upload the CA certificate. See Working with Certs and the Truststore . TLS Certificates Use Self Signed Certificate - Typically only selected for POCs and\n demos. For production environments, HPE recommends uploading your own\n certificates (CA certificate and Private Key). CA Certificate - Upload the CA certificate Private Key - Upload the private key. Certificate - Upload additional certificates. See AD/LDAP Servers and Working with Certs and the Truststore . Proxy Details NOTE The proxy details apply to the HPE Ezmeral Unified Analytics Software application; they do not apply to the\n host. HTTP Proxy - Enter the URL for the proxy data center. HTTPS Proxy - Enter the URL for the proxy data center. No Proxy - Each of the hosts in the HPE Ezmeral Unified Analytics Software cluster must have the IP addresses of the coordinator and control\n plane hosts of the workload cluster in the no_proxy list. Add\n the FQDN of the master host in the workload cluster OR a comma-separated list of IP addresses or hostnames . Note that\n some of the IP addresses in the cluster are required to bypass the proxy\n settings to reach the internal pod/container entities. Use the following\n string of IP addresses to bypass the proxy settings: 10.96.0.0/12,10.224.0.0/16,10.43.0.0/16,\\\n.external.hpe.local,localhost,.cluster.local,.svc,\\\n.default.svc,127.0.0.1 For example, if your domain is ezua.company.com , you would enter the following string\n for no_proxy: 10.96.0.0/12,10.224.0.0/16,10.43.0.0/16,\n.external.hpe.local,localhost,.cluster.local,.svc,\\\n.default.svc,127.0.0.1, ezua.company.com External URL - This field only applies to the workload nodes and is\n only required if you select HA for the HPE Ezmeral Unified Analytics Software application. If you want HA for\n the Ezmeral Coordinator ,\n contact HPE Support before you install on the Ezmeral Coordinator node. Click Next to proceed to User Authentication Details . User Authentication Details Connected and air-gapped installations can use internal or external LDAP. Internal LDAP is\n typically used for POC and demo scenarios. External LDAP is typically used for production\n environments. See AD/LDAP Servers . To add user authentication details, complete the following steps: Either select or do not select the option to use an external LDAP server. If you select Use External LDAP Server , complete the related fields. The\n user that you enter becomes the default Unified Analytics administrative user. This user must already exist in the\n AD/LDAP server that you specify. List of related fields: Select Active Directory if the LDAP is an Active Directory\n (ADLDAP) Security Protocol LDAP Server Address Server Port Bind DN Bind Password Search Base DN Trust Store File Trust Store Password Username Attribute Fullname Attribute Email Attribute UID Attribute GID Attribute Group Name Group GID Username of the default admin user Validation options If you do not select the Use External LDAP Server option, Provide\n the following information to create the default Unified Analytics administrative user. This user must be part of your\n organization and have an organization email, for example bob@company.com . Username Full Name Email Password Click Submit . The installation of components and applications begins. The Installation Status screen displays the installation status of the components\n and applications as the installation progresses. IMPORTANT Note the IP addresses on this screen. You need these to complete the post\n installation steps to update your DNS A and DNS records. If the installation fails at any point, click Download Logs to access\n the logs files for the Ezmeral Coordinator , infrastructure services, or application services.\n Review the log files to troubleshoot the failure. If\n you cannot resolve the installation failure issue, contact HPE Support. TIP The first status update shows the progress of the Ezmeral Coordinator . When complete,\n the Download Kubeconfig button appears on the screen. You can download the kubeconfig for the Ezmeral Coordinator and worker cluster. Clicking Open HPE Ezmeral Unified Analytics Software launches the UI. Clicking Start New Installation installs another instance of HPE Ezmeral Unified Analytics Software . Post Installation Steps Complete the post installation steps, as described in Post Installation Steps . Installation Prerequisites Lists the prerequisites for HPE Ezmeral Unified Analytics Software installation on user-provided hosts in connected (internet access) and air-gapped (no internet access) environments. Post Installation Steps Provides steps to complete after you install HPE Ezmeral Unified Analytics Software on user-provided hosts. More information Installation Prerequisites Post Installation Steps Administration AD/LDAP Servers Configuring Included Applications Upgrading Included Frameworks Managing Imported Tools and Frameworks On this page Prerequisites Run the Installation Script to Access the Installer Web UI Node Setup Installation Details User Authentication Details Post Installation Steps Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Installation/install-physhost-vm.html", + "title": "Installing on User-Provided Hosts (Connected and Air-gapped Environments)" + }, + { + "content": "\nInstallation Prerequisites Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the installation prerequisites. Installation Prerequisites Lists the prerequisites for HPE Ezmeral Unified Analytics Software installation on user-provided hosts in connected (internet access) and air-gapped (no internet access) environments. Post Installation Steps Provides steps to complete after you install HPE Ezmeral Unified Analytics Software on user-provided hosts. Installing HPE Ezmeral Unified Analytics Software on OpenShift Provides the prerequisites and steps for installing HPE Ezmeral Unified Analytics Software in an OpenShift cluster and also lists the current limitations. HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and air-gapped environments. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Installation Prerequisites Lists the prerequisites for HPE Ezmeral Unified Analytics Software installation on user-provided hosts in connected\n (internet access) and air-gapped (no internet access) environments. You can install HPE Ezmeral Unified Analytics Software on user-provided hosts. User-provided hosts are machines that meet the prerequisite criteria listed on\n this page. A user-provided host\n is a bare metal machine or virtual machine (VM) that meets the prerequisites listed. HPE Ezmeral Unified Analytics Software supports bare metal and VM installations on AWS, GCP, and Azure. If you need to add additional user-provided hosts to increase the amount of resources for applications and\n users after you install HPE Ezmeral Unified Analytics Software , you can expand the cluster, as described in Expanding the Cluster . IMPORTANT The HPE Ezmeral Unified Analytics Software product downloaded includes the software binaries, installation script, Air Gap Utility\n (for air-gapped environments), and a README.txt file. After purchasing HPE Ezmeral Unified Analytics Software , the downloads are\n made available to you through the Access your products button in the HPE\n Subscription Electronic Receipt email that you receive from HPE. When creating a domain name, opt for a subdomain name that is only used for HPE Ezmeral Unified Analytics Software . For example, if\n your top level domain (TLD) name is company.com , use a subdomain name\n such as ezua.company.com . Software Binaries The README.txt file included with the product provides instructions for downloading and\n extracting the HPE Ezmeral Unified Analytics Software binaries that are required to install the product, including the Air Gap Utility. Air Gap Utility (Only required for air-gapped environments) Use the Air Gap Utility to get the required container images. Create a local repository if\n you do not already have one. In addition to setting up a local repository, you must also set\n up a RHEL/Rocky 8-based yum repository. The installer runs yum commands\n against this repository. See Using the Air Gap Utility for additional information. For\n operating system support, see the Operating System support matrix. Host Machines HPE Ezmeral Unified Analytics Software installation\n requires two types of host machines with the following minimum storage requirements: Table 1 . Host Machine Requirements Machine Type VCPU Memory Size (GB) Disk Size (Disk Count) Machine Count Control plane ( Ezmeral Coordinator /Management Cluster) 4 32 500 (1) 2 Workload 32 128 500 (2) 3 Launcher Host 1 2 4 1 This is the host that\n runs the installation script. This host is separate from the hosts that deploy HPE Ezmeral Unified Analytics Software . This host must also\n have Docker version 20.10 (with a minimum of 20GB storage) and firewall allowed on port\n 8080. IMPORTANT To meet VCPU sizing requirements, at least three storage capable hosts are\n required. Host machines must have a sudo password. Mount Points The supported mount points and their minimum sizes depends on the type of host. The\n host file system must have at least the root mount point: / The\n total minimum required size is the combination of all the mount point sizes listed\n in the table for a given type of host. If you choose not to configure a listed mount\n point, that mount point's required size must be added to the root\n ( / ) mount point. For example, if you choose not to\n configure /opt as a separate mount point on the Controller host,\n you must add the 100GB listed for /opt to the 50GB listed for the\n root mount point (/). That is, if /opt is not a separate mount\n point, the Controller host requires 150GB for the root ( / ) mount\n point. The storage size for the Controller and Shadow Controller hosts must\n match. Table 2 . Mount Point Requirements for Controller and Shadow Controller\n Hosts Mount Point Minimum Size (GB) Purpose / 50 Root file system where the Unified Analytics components are\n stored /var , or /var/lib , or /var/lib/docker 150 Stores container metadata information /opt 100 Stores all Unified Analytics software /srv or /srv/bluedata 20 /srv/bluedata stores all temporary runtime files,\n including any artifacts, such as scripts and JAR files, that have been\n uploaded for running jobs. Table 3 . Mount Point Requirements for Kubernetes Hosts Mount Point Minimum Size (GB) Purpose / 70 Root file system where the Unified Analytics components are stored. /var , or /var/lib , or /var/lib/containerd , or /var/lib/docker 150 Stores container metadata information. /var/lib/containerd is used for hosts running the\n Hewlett Packard Enterprise distribution of Kubernetes. /var/lib/docker is used for the other hosts in the\n deployment. /opt 50 Stores all Unified Analytics software. /opt/ezkube (on\n Kubernetes hosts hosts only), /opt/bluedata , and /opt/hpe are used to install Unified Analytics . VCPU Sizing Guidelines The combined VCPUs of worker hosts should be no less than 96 for deploying HPE Ezmeral Unified Analytics Software services and apps; otherwise, some services cannot start due to lack of resources.\n If the machine configuration has 16 VCPU and 64 GB of memory, HPE recommends using\n a minimum of six (6) machines. When installing HPE Ezmeral Unified Analytics Software , the VCPU option (on the Installation Details screen) should\n be equal to or less than the total VCPUs of the combined capability of the worker\n hosts; otherwise, the installation will fail due to lack of resources. DNS Configuration The DNS configuration requires that: All hosts have A records added to DNS. The name resolution works forward and backward. The FQDN is a maximum of 63 characters. All hosts must be part of the DNS domain and be able to resolve the FQDNs. Operating System HPE Ezmeral Unified Analytics Software supports RHEL\n 8.8. All machines serving as hosts must run the same OS. Both GPU\n and non-GPU hosts are supported with RHEL 8.8. Hewlett Packard Enterprise strongly recommends using only dedicated hosts with clean OS\n installations on them. Installing HPE Ezmeral Unified Analytics Software on hosts with other running applications can cause unpredictable\n behavior. To ensure your OS has the latest packages, Hewlett Packard Enterprise recommends\n performing a yum update before installation. Use the standard OS kernel; modifications may cause HPE Ezmeral Unified Analytics Software to function unpredictably. To minimize the need for troubleshooting, Hewlett Packard Enterprise recommends newer kernel versions. HPE Ezmeral Unified Analytics Software does not\n support upgrades between major OS versions. For example, if you are migrating from OS\n version 7.x to 8.x, you must perform a new installation (not an upgrade), and then install HPE Ezmeral Unified Analytics Software . RHEL 8.8 Requirements HPE Ezmeral Unified Analytics Software has\n the following RHEL requirements: RHEL systems must have active, valid subscriptions in order to access the RHEL\n RPM repositories. Firewall is supported only in iptables mode for RHEL 8.8. TIP The GPU operator does not support Ubuntu or Rocky. Rocky works in non-GPU environments. For information related to operating systems and operating system version\n support, contact HPE Support. Network HPE Ezmeral Unified Analytics Software installation\n have the following network requirements: Table 4 . Network Requirements VM DNS Network IP Address Single vNIC, static IP DNS server to resolve the FQDN Single network connecting all machines. Static: 2 controller machine + 3 worker machines Disable IP Checksum Before you install HPE Ezmeral Unified Analytics Software , run the following script on all nodes to disable IP checksum: #! /bin/bash\n\n# Script to disable ip checksum offload using ethtool for the primary nic. We will create a oneshot systemd service\n# to persist this across reboots\n\n# Setting ipaddress of the node\nHOST_IP=\"$(hostname -i)\"\n\necho \"fetching interface name for host: $HOST_IP\"\nPRIMARY_NIC=$(ip -o a show | grep ${HOST_IP} | awk '{print $2}')\n\necho \"printing current configuration for the nic\"\n\nethtool -k \"${PRIMARY_NIC}\" | grep tx-checksum-ip-generic\n\necho \"creating env and systemd unit file to turn chksum off for interface \\\"$PRIMARY_NIC\\\"\"\n\ncat > /etc/sysconfig/ezfab-chksum-off < /usr/lib/systemd/system/ezfab-chksum-off.service < -x509 -days 365 -subj \"/CN= <$CERTIFICATE_NAME> \" -addext \"subjectAltName = DNS: <$FULL_DNS> \" -out <$CRT_FILE_LOCATION> //Example:\nopenssl req -newkey rsa:4096 -nodes -sha256 -keyout /local_registry/certs/domain.key -x509 -days 365 -subj \"/CN=Myname\" -addext \"subjectAltName = DNS:*.example.com\" -out /local_registry/certs/domain.crt You must copy this certificate file to the standard location of the\n operating system. For RHEL, the standard cert location is /etc/pki/ca-trust/source/anchors : cp /local_ registry/certs/domain.crt /etc/pki/ca-trust/source/anchors/ After you copy the file, run: update-ca-trust Create access credentials to the registry to keep it secure. You can skip this\n step for anonymous\n access. htpasswd -bBc /local_registry/auth/ <$PASSWORD_FILENAME> <$USERNAME> <$PASSWORD> //Example:\nhtpasswd -bBc /local_registry/auth/htpd user01 admin123 Expose the registry on port 5000. Add this rule to firewalld to\n open the port and make it\n available. firewall-cmd --zone=public --permanent --add-port=5000/tcp \nfirewall-cmd reload Create the container to use as local registry. In this example, podman is used to create the container; however, you can use\n any container utility that you prefer: podman run -d --name <$REGISTRY NAME> -p <$PORT>:<$PORT> \\ \n-v <$DATA_DIRECTORY> :/var/lib/registry:z \\ \n-v <$AUTH_DIRECTORY> :/auth:z \\ \n-v <$CERT_DIRECTORY> :/certs:z \\ \n-e \"REGISTRY_AUTH=htpasswd\" \\ \n-e \"REGISTRY_AUTH_HTPASSWD_REALM= <$REALM_NAME> \" \\ \n-e \"REGISTRY_HTTP_SECRET= <$PHRASE_FOR_SECRET> \" \\ \n-e \"REGISTRY_AUTH_HTPASSWD_PATH= <$PATH_TO_AUTH_FILE> \" \\ \n-e \"REGISTRY_HTTP_TLS_CERTIFICATE= <$PATH_TO_CERT_FILE> \" \\ \n-e \"REGISTRY_HTTP_TLS_KEY= <$PATH_TO_KEY_FILE> \" \\ <$REGISTRY_IMAGE> //Example:\npodman run -d --name local-registry -p 5000:5000 \\ \n-v /local_registry/data:/var/lib/registry:z \\ \n-v /local_registry/auth:/auth:z \\ \n-v /local_registry/certs:/certs:z \\ \n-e \"REGISTRY_AUTH=htpasswd\" \\ \n-e \"REGISTRY_AUTH_HTPASSWD_REALM=my-local-registry\" \\ \n-e \"REGISTRY_HTTP_SECRET=ALongRandomSecretForLocalRegistry\" \\ \n-e \"REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpd\" \\ \n-e \"REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt\" \\ \n-e \"REGISTRY_HTTP_TLS_KEY=/certs/domain.key\" \\ \ndocker.io/library/registry:2 Use curl to access the registry and test that the registry is\n up and running: curl -u <$USERNAME>:<$PASSWORD> -k -X GET https://$(hostname -f):5000/v2/_catalog \n\n//Example:\ncurl -u user01:admin123 -k -X GET https://local-registry.example.com:5000/v2/_catalog (Required) Download the images. To download the images, refer\n to Using the Air Gap Utility for information about pulling HPE Ezmeral Unified Analytics Software images into\n the local registry. D. (Air-Gapped Only) Apply the image registry certificate You can configure your air-gapped registry with HTTP or HTTPS (see previous steps).\n To make it accessible using the HTTPS protocol, you need to add a certificate to the\n registry. This certificate can be a self-signed certificate (see previous steps) or a\n company-wide common certificate. The same certificate can be used for multiple registries.\n If there are multiple registries and all of them are configured with different certificates,\n the OpenShift configuration should be updated with all the certificates. Follow this\n procedure to update the registry certificate on your OpenShift cluster. Create a config map with all the certificates for accessing multiple\n registries. The following syntax shows how to create one config map with\n one registry and one\n certificate. kubectl create -n openshift-config cm <$REGISTRY_CONFIG_NAME> --from-file= <$REGISTRY_URL_WITHOUT_PROTOCOL> = <$CERTIFICATE_FILENAME> If\n you have more than one registry and more than one certificate, run this instead: kubectl create -n openshift-config cm <$REGISTRY_CONFIG_NAME> \\ \n --from-file= <$REGISTRY_URL_WITHOUT_PROTOCOL> = <$CERTIFICATE_FILENAME> \\ \n --from-file= <$REGISTRY_URL_WITHOUT_PROTOCOL> = <$CERTIFICATE_FILENAME> //Example: \nkubectl create -n openshift-config cm image-registry-config --from-file=image-registry.example.com=registry.crt \nkubectl create -n openshift-config cm multiple-registry-config -\\\n --from-file=image-registry.example.com=registry.crt \\ \n --from-file=image-registry.example.com..5000=registry.crt \\ \n --from-file=new-image-registry.example.com=newCert.crt Once the configmap is available, patch that configmap with the\n existing OpenShift config: kubectl patch image.config.openshift.io cluster --type merge -p '{\"spec\":{\"additionalTrustedCA\":{\"name\":\" <$REGISTRY_CONFIG_NAME> \"}}}' \n\n//Example: \nkubectl patch image.config.openshift.io cluster --type merge -p '{\"spec\":{\"additionalTrustedCA\":{\"name\":\" multiple-registry-config \"}}}' E. Install the CertManager Install the cert manager on the OpenShift cluster. The version should be higher than 1.10. To install CertManager,\n run: kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.1/cert-manager.yaml Install HPE Ezmeral Unified Analytics Software with the Installer Web UI To install HPE Ezmeral Unified Analytics Software on OpenShift through the Installer Web UI, complete the followign steps: Run the installation script that was provided with the software bundle. The host on\n which you run this command must be connected to the internet (the Web UI image is public\n for the specific version of HPE Ezmeral Unified Analytics Software that you are installing) or must point to a local registry where\n you pre-pulled the Web UI image. Running the installation script opens the launcher\n that guides you through the prompts to start the Installer Web UI. For a connected environment,\n run: ./start_ezua_installer_ui.sh For an air-gapped environment, run the following command and provide the\n URL of the image repository that you configured as a\n prerequisite: ./start_ezua_installer_ui.sh --image <$PRIVATE_REGISTRY> /us.gcr.io/mapr-252711/hpe-ezua-installer-ui Copy the OpenShift kubeconfig to the UI installer container. The\n UI installer is a container that accesses the OpenShift cluster via kubectl commands. You must give the UI installer container kubectl access to the OpenShift cluster. In a connected environment, you can download the OpenShift kubeconfig from the OpenShift console. In an air-gapped environment, use the kubeconfig that was generated during\n installation. Once you have the kubeconfig , run the following command to\n place it in the container running the HPE Ezmeral Unified Analytics Software Web UI Installer (located at ~/.kube/config ): docker cp < $PATH_TO_KUBECONFIG> <$CONTAINER_ID> :/root/.kube/config Update the hosts entries in the Web UI Installer so it can reach the OpenShift\n cluster. In a connected environment , you can find the hosts entries of your\n OpenShift cluster in the OpenShift console. In the OpenShift console, go to Clusters on the left and then select the cluster on which you are\n installing HPE Ezmeral Unified Analytics Software . Under the Installation Progress card, click Not Able to\n Access the Web Console? . In the dialog that opens, copy the list of\n hosts: Example: Screenshot from the\n OpenShift console that shows the hosts of an example OpenShift\n cluster. In an air-gapped environment , copy the DNS entries (used during\n installation) to the Web UI Installer: To exec into the Web UI Installer container,\n run: docker exec --it <$CONTAINER_ID> bash Edit the /etc/hosts file and add the host entries. Navigate back to the launcher that opened when you ran the installation script to\n start the Installer Web UI. Select Install in the OpenShift tile. On the OpenShift Setup screen, upload your OpenShift kubeconfig and then\n click Next . See Installing on User-Provided Hosts (Connected and Air-gapped Environments) to continue installation, starting with Installation Details on that\n page. TIP If installation fails, you can access the Installer Web UI logs\n in the live container at /root/ezua-installer-ui/log. On this page A. Verify that the VMs (nodes) in the OpenShift cluster meet the installation\n requirements B. Apply labels to the storage nodes C. (Air-Gapped Only) Inject HPE Ezmeral Unified Analytics Software images into your local repository D. (Air-Gapped Only) Apply the image registry certificate E. Install the CertManager Install HPE Ezmeral Unified Analytics Software with the Installer Web UI Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Installation/install-openshift.html", + "title": "Installing HPE Ezmeral Unified Analytics Software on OpenShift" + }, + { + "content": "\nHPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the installation prerequisites. Installing HPE Ezmeral Unified Analytics Software on OpenShift Provides the prerequisites and steps for installing HPE Ezmeral Unified Analytics Software in an OpenShift cluster and also lists the current limitations. HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and air-gapped environments. Service Activation and Billing in Connected Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in a connected environment, including activation steps. Service Activation and Billing in Air-Gapped Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in an air-gapped environment, including activation steps. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. HPE Ezmeral Unified Analytics Software Service\n Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and\n air-gapped environments. When you install HPE Ezmeral Unified Analytics Software through the installation wizard, you have the option to install in a\n connected environment or air-gapped environment. The activation and billing processes differ\n for each type of installation. In a connected environment, billing is an automated process. In\n an air-gapped environment, the billing process is manual and requires an activation code in\n addition to an activation key. After you install and deploy HPE Ezmeral Unified Analytics Software , the system provides you with a URL to access Unified Analytics . The first time you go to the URL,\n the system prompts you for an activation key (and an activation code for air-gapped\n environments) to activate the product. The following sections provide the information needed to get the activation key and\n activation code (for air-gapped environments). When you have those, you can return to the Unified Analytics URL and enter the activation\n key to activate Unified Analytics . IMPORTANT HPE Ezmeral Unified Analytics Software services only work with a valid activation key and activation code\n (for air-gapped environments). Services are deactivated if the activation key and/or\n activation code become invalid, for example, if contractual obligations are not\n met. Service Activation and Billing in Connected Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in a connected environment, including activation steps. Service Activation and Billing in Air-Gapped Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in an air-gapped environment, including activation steps. Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Installation/service-activation.html", + "title": "HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes" + }, + { + "content": "\nService Activation and Billing in Connected Environments Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the installation prerequisites. Installing HPE Ezmeral Unified Analytics Software on OpenShift Provides the prerequisites and steps for installing HPE Ezmeral Unified Analytics Software in an OpenShift cluster and also lists the current limitations. HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and air-gapped environments. Service Activation and Billing in Connected Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in a connected environment, including activation steps. Service Activation and Billing in Air-Gapped Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in an air-gapped environment, including activation steps. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Service Activation and Billing in Connected Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in a connected environment,\n including activation steps. An administrator needs the following information to activate Unified Analytics in a connected environment: Information Description Platform\n ID Unique, system-generated ID assigned to the Ezmeral Coordinator instance during installation. The ID is displayed when\n you go to the Unified Analytics URL provided after installation. Activation key The license file that the administrator uploads to complete the installation\n of Unified Analytics . The\n administrator can download the activation key in their MY\n HPE SOFTWARE CENTER customer portal. The activation key file is a signed\n XML file. Service activation and billing in connected environments is mostly automated. The only\n manual process that the administrator performs is going to MY HPE SOFTWARE\n CENTER and downloading the activation key file and then uploading the file into Unified Analytics to activate the product.\n The activation key is valid for the length of the contract, typically one, three, or five\n years unless the contract is made invalid, such as product cancellation or failure to meet\n the contractual agreement. To activate Unified Analytics , an\n administrator completes the following steps: Install and deploy Unified Analytics .\n For connected environments, select the Connected option during installation. The system\n provides the URL to access Unified Analytics . Go to the Unified Analytics UI URL\n provided. The window displays a Platform ID and requests an activation key. You cannot\n proceed with activation until you provide the activation key file. Copy the unique Platform ID. After purchasing HPE Ezmeral Unified Analytics Software , the activation key is made available to you through the Activate your products button in the HPE Subscription Electronic Receipt email that you receive from HPE. This receipt directs you to MY HPE\n SOFTWARE CENTER where you can activate your product. On the Activate EON page, enter the Platform ID (copied in step 3) in the Platform ID\n field. Once activation is completed, download the Unified Analytics activation key file. Return to the Unified Analytics URL and\n upload the activation key file. Billing Process in Connected Environments When the activation key is uploaded, the cluster registers with the HPE billing service.\n Consumption data is uploaded to the HPE billing service on an hourly basis. Consumption data\n is based on the vCPU used by applications every hour. Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Installation/connected-service-activation.html", + "title": "Service Activation and Billing in Connected Environments" + }, + { + "content": "\nService Activation and Billing in Air-Gapped Environments Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the installation prerequisites. Installing HPE Ezmeral Unified Analytics Software on OpenShift Provides the prerequisites and steps for installing HPE Ezmeral Unified Analytics Software in an OpenShift cluster and also lists the current limitations. HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and air-gapped environments. Service Activation and Billing in Connected Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in a connected environment, including activation steps. Service Activation and Billing in Air-Gapped Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in an air-gapped environment, including activation steps. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Service Activation and Billing in Air-Gapped Environments Provides information for administrators about HPE Ezmeral Unified Analytics Software activation and billing in an air-gapped environment,\n including activation steps. An administrator needs the following information to activate Unified Analytics in an air-gapped environment: Information Description Platform\n ID Unique, system-generated ID assigned to the Ezmeral Coordinator instance during installation. The ID is displayed when\n you go to the Unified Analytics URL provided after installation. Activation key The license file that the administrator uploads to complete the installation\n of Unified Analytics . The\n administrator can download the activation key in their MY\n HPE SOFTWARE CENTER customer portal. The activation key file is a signed\n XML file. Activation code A unique code that HPE Ezmeral Customer Support gives to the administrator\n every 30 days to keep clusters in an active state. Automatically deactivated after\n 45 days (includes a 15-day grace period). The activation code file is a signed\n JSON file. See Billing Process in Air-Gapped Environments and Renewing the Activation Code . Service activation and billing in an air-gapped environment requires an activation key file\n and an activation code. The activation code must be renewed on a monthly basis (every 30\n days). See Billing Process in\n Air-Gapped Environments . Getting the Activation Key File and Activation Code To get the activation key: Install and deploy Unified Analytics .\n For air-gapped deployments, select the Air-Gapped option during installation. The system\n provides the URL to access Unified Analytics . Go to the Unified Analytics URL\n provided. The window displays a Platform ID and requests an activation key and\n activation code. You cannot proceed with the activation until you provide the activation\n key file and activation code. Copy the unique Platform ID. After purchasing HPE Ezmeral Unified Analytics Software , the activation key is made available to you through the Activate your products button in the HPE Subscription Electronic Receipt email that you receive from HPE. This receipt directs you to MY HPE\n SOFTWARE CENTER where you can activate your product. On the Activate EON page, enter the Platform ID (copied in step 3) in the Platform ID\n field. Once activation is completed, download the Unified Analytics activation key file. Return to the Unified Analytics URL and\n upload the activation key file. To get the first activation code to activate Unified Analytics : To request the activation code, open a support case at https://support.hpe.com using the account you have on the HPE Support Center\n customer portal. The support ticket must include the following information: Activation key Platform ID Cluster ID TIP This is the same portal that you would\n use to create any kind of ticket related to your platform. If you do not have an\n account, you can create an account for free. When you create an account, you must\n link your support contract to the account. If you have never used the customer\n portal, refer to the KB article here to help you get your support portal\n account up and running. When support notifies you that the activation code is available in your customer\n portal, go to the portal and get the code. Return to the Unified Analytics URL and\n upload both the activation key and activation code files. Billing Process in Air-Gapped Environments Contracts for air-gapped installations must be validated with an activation code on a\n monthly basis. The Unified Analytics cluster\n securely stores billing data. The Unified Analytics site administrator must download the billing data at the end of the\n billing cycle and then open an HPE Support Center customer support ticket to renew the\n activation code. The support ticket that the administrator opens must include the following\n information: Billing data (downloaded from the Billing tab in Unified Analytics ) Cluster ID HPE Support Center renews the certificate and credentials through the billing and\n registration system and then uploads the new activation code to your customer portal. This\n cycle continues on a monthly basis to keep clusters active. Failure to adhere to this\n process can result in cluster deactivation or service disruption. Unified Analytics provides regular updates and\n reminder alerts on the product screen. Renewing the Activation Code To get a new activation code (every 30 days), complete the following steps: Sign in to Unified Analytics . In the left navigation bar, select Administration > Settings . Click the Billing tab. On the Billing tab, download the billing data for the current billing\n cycle. Open a support case at https://support.hpe.com using the account you have on the HPE\n Support Center customer portal and include the following information: Cluster ID Billing data file When support updates the ticket, go to your customer portal to get the new activation\n code. Return to Unified Analytics , and enter\n the activation code in the Activation Code field on the Billing tab. IMPORTANT Failure to complete these steps monthly can result in access to the Unified Analytics applications and\n services being disabled. On this page Getting the Activation Key File and Activation Code Billing Process in Air-Gapped Environments Renewing the Activation Code Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Installation/airgapped-service-activation.html", + "title": "Service Activation and Billing in Air-Gapped Environments" + }, + { + "content": "\nUsing the Air Gap Utility Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the installation prerequisites. Installing HPE Ezmeral Unified Analytics Software on OpenShift Provides the prerequisites and steps for installing HPE Ezmeral Unified Analytics Software in an OpenShift cluster and also lists the current limitations. HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and air-gapped environments. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. IMPORTANT The README.txt file included with the product provides instructions for\n downloading and extracting the HPE Ezmeral Unified Analytics Software binaries that are required to install the product, including the Air\n Gap Utility. If you downloaded and extracted the files, as described in the README.txt file,\n you should have the Air Gap Utility. Requirements The Air Gap Utility has the following requirements: Python 2.7 3.6 and above Operating System At minimum: RHEL 8 SLES 15 Rocky Linux 8 Scopeo At minimum: For RHEL or Rocky Linux: Skopeo 0.1.40 For SLES: Skopeo 0.1.41 About the Air Gap Utility HPE Ezmeral Unified Analytics Software provides a\n utility you can use to query, filter, and download all air gap container images necessary\n for your environment to a local filesystem or remote registry. Installing the Air Gap Utility Package Before downloading files for your air gap environment, you must first install the air gap\n script package. You can install the package on any non-platform host, even outside the\n platform installation. Python 2.7 or Python 3.6 and greater is required for\n installation. To install the air gap utility package: Install Skopeo. In the CLI, enter the following: For RHEL: dnf install -y skopeo For SLES: zypper install -y skopeo Install the hpeairgaputil package: PIP2: pip install hpeairgaputil-1.5.3-py2.py3-none-any.whl PIP3: pip3 install hpeairgaputil-1.5.3-py2.py3-none-any.whl TIP To uninstall hpeairgaputil , use: PIP2: pip uninstall hpeairgaputil-1.5.3-py2.py3-none-any.whl PIP3: pip3 uninstall hpeairgaputil-1.5.3-py2.py3-none-any.whl Using Air Gap Utility Filters After installing the air gap utility package, you can filter the available apps for a given HPE Ezmeral Unified Analytics Software version in a\n project. You must provide one of the following mandatory arguments in each of your commands: --list_releases --release TIP To display a list of options available in the ezua-airgap-util , use the following\n command: ezua-airgap-util --help You can use filters to display the following information: NOTE The system output in the\n following examples are for illustration only, and might not represent the software\n available for your release of HPE Ezmeral Unified Analytics Software . Release: List all releases with the following\n command: ezua-airgap-util --list_releases Images : List all the images for a particular\n release: ezua-airgap-util --release List available images without\n headers: ezua-airgap-util --release --noheaders List all required\n images: ezua-airgap-util --release --required List all optional\n images: ezua-airgap-util --release --optional List components: List all the components that are available for a particular\n release: ezua-airgap-util --list_components --release Component: List all images for a particular\n component: ezua-airgap-util --release --component Size: Valid values include b , kb , mb , and gb . Display images less than a certain\n size: ezua-airgap-util --release --lessthan 1mb Display images greater than a certain\n size: ezua-airgap-util --release --greaterthan 5gb Display images between two\n sizes: ezua-airgap-util --release --lessthan 6gb --greaterthan 5gb You can combine filters to provide a more customized query, for\n example: ezua-airgap-util --release --component falco To filter for a specific name or string, you can use the options \u2013noheaders | grep\n : ezua-airgap-util --release --noheaders | grep Downloading Air Gap Files After Using Air Gap Utility Filters to find the necessary files for\n your deployment, download the files as follows: Use a single command to filter and copy air gap files to a local filesystem or remote\n registry. Include all filters you want to apply to your download. Include --dest_compress to compress the files and download in a .tgz file.\n Otherwise, the files download in a .tar file. For\n example: ezua-airgap-util --release --lessthan 1mb --copy --dest_path images/ --dest_compress Use --force to delete the .tgz or .tar file of the image if it already\n exists. For\n example: ezua-airgap-util --release --lessthan 1mb --copy --dest_path images/ --force\nezua-airgap-util --release --lessthan 1mb --copy --dest_path images/ --dest_compress --force To copy multiple images to a local filesystem, run the following command.\n Provide the destination path where you want to store your\n files. ezua-airgap-util --release --copy --dest_path To copy a single image to a local filesystem, execute the following\n command. Provide the destination path where you want to store your\n files. ezua-airgap-util --release --image --copy --dest_path To copy multiple images to a remote container registry, select one of the\n following options. Provide the destination URL and credentials for your container\n registry. Use the --dest_creds command line\n option: ezua-airgap-util --release --copy --dest_url --dest_creds Alternatively, set environment variable AIRGAP_UTIL_CREDS .\n You can set environmental variables using the export command: export AIRGAP_UTIL_CREDS=: To copy a single image to a remote container registry, execute the\n following command. Provide the destination URL and credentials for your\n container\n registry. ezua-airgap-util --release --image --copy --dest_url --dest_creds Air Gap Utility Logging By default, the Air Gap Utility creates a logs/ directory in the present\n working directory from which you invoked the Air Gap Utility command line. You can change the log directory location as follows: If you pass the --logdir argument in the Air Gap Utility command\n line, then the Air Gap Utility creates a logs/ directory in the path\n provided in the --logdir arguement. If you set the AIRGAP_UTIL_LOGDIR environment variable, but do not\n pass the --logdir argument in the Air Gap Utility command line, then\n the Air Gap utility creates a logs/ directory in the path set in the AIRGAP_UTIL_LOGDIR environment variable. NOTE The Air Gap Utility does not create log files when commands are run in TTY mode. For\n example: ezua-airgap-util --release v1.3.0 | grep -i airflow Using Skopeo --options with the Air Gap Utility This section describes how to use Skopeo --options with the Air Gap\n Utility and provides usage examples. The following examples show the Skopeo --preserve-digests and --retry-times options used with the Air Gap\n Utility: ezua-airgap-util --release v1.3.0 --image longhornio/livenessprobe:v2.9.0 --copy --dest_path ezua-v1.3.0/ --options=\"--preserve-digests\" zua-airgap-util --release v1.3.0 --image longhornio/livenessprobe:v2.9.0 --copy --dest_path ezua-v1.3.0/ --options=\"--retry-times 5\" You\n can use multiple Skopeo options with the Air Gap Utility. The following example demonstrates how to use the Skopeo --preserve-digests and --retry-times options\n together: ezua-airgap-util --release v1.3.0 --image longhornio/livenessprobe:v2.9.0 --copy --dest_path ezua-v1.3.0/ --options=\"--preserve-digests --retry-times 5\" On this page Requirements About the Air Gap Utility Installing the Air Gap Utility Package Using Air Gap Utility Filters Downloading Air Gap Files Air Gap Utility Logging Using Skopeo --options with the Air Gap Utility Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Installation/airgap-utility.html", + "title": "Using the Air Gap Utility" + }, + { + "content": "\nPorts Used by HPE Ezmeral Unified Analytics Software Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Installing on User-Provided Hosts (Connected and Air-gapped Environments) Provides the steps for installing HPE Ezmeral Unified Analytics Software on user-provided hosts in connected and air-gapped environments. A user-provided host is a bare metal machine or virtual machine (VM) that meets the installation prerequisites. Installing HPE Ezmeral Unified Analytics Software on OpenShift Provides the prerequisites and steps for installing HPE Ezmeral Unified Analytics Software in an OpenShift cluster and also lists the current limitations. HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes Provides post-installation steps required to activate HPE Ezmeral Unified Analytics Software in connected and air-gapped environments. Using the Air Gap Utility Describes how to use the Air Gap Utility to download files in an air-gapped HPE Ezmeral Unified Analytics Software environment. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Ports Used by HPE Ezmeral Unified Analytics Software Lists and describes the ports used by HPE Ezmeral Unified Analytics Software . Kubernetes Ports Ports Purpose 80, 443 Ingress traffic into the cluster 6443 kube apiserver 2379-2380 etcd 10250 kubelet 10248 kubelet (healthz endpoint) 10249 kube-proxy (metrics) 10256 kube-proxy (health check) 10259 kube-scheduler 10257 kube-controller-manager 9099 calico-node 9100 Node exporter service 30000-32767 NodePort Services Installer Host Ports TIP The installer automatically opens ports if the firewall is disabled. Ports Purpose 8080 Installer UI On this page Kubernetes Ports Installer Host Ports Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Installation/ua-ports.html", + "title": "Ports Used by HPE Ezmeral Unified Analytics Software" + }, + { + "content": "\nIdentity and Access Management Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP servers. Also describes some of the server-related configuration options that you set during installation. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery and EzUserConfig custom resources. Managing Data Access Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . HPE Ezmeral Unified Analytics Software uses Keycloak\n as its OIDC provider for identity and access management. Keycloak secures access to HPE Ezmeral Unified Analytics Software and applications\n through authorization, authentication, and SSO protocols. Users authenticate to Keycloak\n instead of authenticating to multiple application services. The following steps describe the basic access flow for a user signing in to HPE Ezmeral Unified Analytics Software application\n services: A user goes to the application URL with their web browser. If the user has not yet signed in to an application in this cluster, the user's browser\n is redirected to a sign-in page that is managed by the cluster's Keycloak instance. The user enters their credentials (username and password) at the sign-in page. Keycloak verifies the user's credentials against those in the organization's AD/LDAP\n server. If the provided credentials are valid, the user's browser is redirected to the\n originally requested application URL. The browser receives one or more cookies. The\n cookies represent active sessions with Keycloak and the application. The application is (through a secure back channel) provided with an access token that\n encapsulates the user's authentication and their authorized roles within the cluster. The application internally uses the access token to determine the user's identity and\n authorization. Some applications may also use this token to communicate with other\n services within the cluster. Once a user signs in to HPE Ezmeral Unified Analytics Software , SSO enables the user to seamlessly switch between different\n authentication-requiring application services while the session is valid. For example, the\n user can open the Feast application without reentering their credentials. However, if the user\n signs out of HPE Ezmeral Unified Analytics Software and\n then tries to access the Feast endpoint URL, the OIDC provider (Keycloak) prompts the user to\n reenter their credentials. If the browser is left idle in the main interface for more than one hour, the user is\n automatically signed out. If more than one week has passed since the user has authenticated,\n the user must re-enter credentials. Architecture The following diagram shows two access flows; one for application A and one for application\n B. Application A is an OIDC-native application that understands how to integrate with a\n provider such as Keycloak for user authentication and authorization. Application B is not an\n OIDC-native application. The auth proxy interacts with Keycloak to ensure that access to application B is only\n available to authenticated users. The proxy also provides information about user identity\n and roles to application B through HTTP headers. Note that although application A is OIDC-native, it also sits behind the auth proxy. This\n ensures that, regardless of how the application itself manages sessions and access tokens, a\n user will be immediately blocked from accessing the application if an admin has revoked the\n user's cluster access. The following sections describe the components in the access flow diagram: Ingress Istio provides the service mesh, request routing, policy enforcement, and the\n proxies used to intervene in service requests. The Istio Ingress gateway performs TLS termination for all incoming traffic and\n validates JSON Web Tokens (JWTs) issued by Keycloak. External client access to\n application services is TLS-terminated at the Istio Ingress gateway, then routed to\n internal service endpoints with mutual TLS encryption. Internal service\n communications also use TLS. Communication to internal services (from the gateway or from applications) is\n policy-restricted to a set of allowed clients. The clients are identified by SPIFFE\n credentials. Istio and SPIRE manage the SPIFFE credentials. Routing Istio routes traffic from the Ingress gateway to the appropriate application\n service based on the DNS name destination of the traffic. During HPE Ezmeral Unified Analytics Software installation,\n the administrator can set up a DNS domain that includes the entire sub-domain DNS\n (sub-domain wild card A record) to route all domain traffic to the Ingress of the\n application environment. Auth Proxy (Oauth2 Proxy) Oauth2 Proxy gates access to applications that are not OIDC aware. It gives those\n applications information about the user's token and claims in the token by inserting\n header values (individual claim values as well as the entire token). The primary\n header values populated by the proxy are: Authorization, from \"Bearer\" prefixed to the entire token in JSON Web Token\n (JWT) format X-Auth-Request-Preferred-Username, from the preferred_username claim X-Auth-Request-Email, from the email claim X-Auth-Request-Groups, from the groups claim (Some additional headers are populated with the same username and groups values\n for backwards compatibility reasons.) Oauth2 Proxy is also used with OIDC-native apps in order to promptly and\n universally enforce administrative revocation of user access. Oauth2 Proxy hooks into application traffic through Istio authorization policies.\n The Istio authorization policy forces traffic to go through the proxy before\n accessing services in HPE Ezmeral Unified Analytics Software . OIDC Client An OIDC client provides a set of API endpoints used for interactions with the OIDC\n provider, such as authenticating users. The OIDC client instance used by browser-accessed applications in an HPE Ezmeral Unified Analytics Software cluster is\n represented by the ID ua and a unique generated secret. This secret\n is passed to application installation scripts during initial setup, then stored in a\n Kubernetes secret for later use in deploying applications that you import into HPE Ezmeral Unified Analytics Software . For any OIDC-native application that integrates with this OIDC client, Keycloak\n must be configured to be aware of an application-specific \"callback URL\" that will\n be used as part of the OIDC flow. For applications imported after initial setup, you\n must modify Keycloak's list of allowed callback URLs using the Keycloak web\n interface or REST API. A separate OIDC client with the ID ua-grant (no client secret) is\n available, which can be used from a CLI or program to directly exchange user\n credentials for tokens. This client implements the resource owner password\n credentials grant flow, or what Keycloak documentation calls Direct Access\n Grant . The ua-grant OIDC client is used for two main purposes, both of\n which apply to REST APIs (or other non-browser service endpoints) exposed to\n out-of-cluster users: If the service requires token-based authentication, the out-of-cluster caller\n can use the ua-grant client to obtain a token which is then\n provided to the service. Note that it is the caller's responsibility to securely\n store and otherwise manage the token. If the service requires username/password authentication, perhaps because of\n constraints from existing service clients, the service can use the ua-grant client internally to validate the user and also\n obtain a token that can be used to communicate with other cluster services. OIDC Provider (Keycloak) Keycloak sources user information from the internal or external AD/LDAP directory.\n Keycloak imports user data from the AD/LDAP server on an hourly basis. The following\n user attributes are mapped from the AD/LDAP server to Keycloak: username email full name NOTE The specific attribute names representing these three items are provided in the\n AD/LDAP configuration details when the HPE Ezmeral Unified Analytics Software is installed. Users authenticate with Keycloak instead of authenticating with individual\n applications. Keycloak assigns a special Keycloak ID to each user and supplies\n applications with tokens in JWT format. Each token contains claims that\n describe the user's authenticated identity and other attributes. The claims mapped from the AD/LDAP user attributes, respectively, are: preferred_username email name, given_name, and family_name (The latter two formed by splitting \"name\"\n at the first space.) The token also contains a groups claim. This claim contains a list of the\n user's group memberships that are important to Keycloak or to applications.\n Currently, the only application-significant group is admin , which is present\n in the groups list if the user has been designated as an Administrator of the\n cluster. For additional information about Keycloak, including how to access the Keycloak\n Admin Web Console, refer to the Keycloak Admin Web Console section in Security . Internal/External AD/LDAP See AD/LDAP Servers and Working with Certs and the Truststore . User Management (Management Operator) An administrator manages users through the HPE Ezmeral Unified Analytics Software UI; for example, creating users and\n assigning roles. These operations result in the creation of custom Kubernetes\n resources (representing queries and user configuration) that are processed by the\n backend user management service. This service has credentials for the Keycloak\n administrative REST API, the Kubernetes API, and (if applicable) the internal LDAP\n server. Tasks performed by this service include: Accessing the internal LDAP server to create and delete users. Marking a user in Keycloak to enable or disable their ability to authenticate\n into the cluster. Assigning roles to users in Keycloak. User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP servers. Also describes some of the server-related configuration options that you set during installation. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery and EzUserConfig custom resources. Managing Data Access Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/identity-management.html", + "title": "Identity and Access Management" + }, + { + "content": "\nUser Isolation Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP servers. Also describes some of the server-related configuration options that you set during installation. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery and EzUserConfig custom resources. Managing Data Access Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . When an HPE Ezmeral Unified Analytics Software administrator adds a new user to HPE Ezmeral Unified Analytics Software , the system automatically assigns each user a user-designated workspace.\n User-designated workspaces isolate each user's applications and objects from other users in\n the cluster. If a user wants to share their work, they can do so by setting access controls\n directly on the objects they create or by changing the namespace in which their applications\n run. HPE Ezmeral Unified Analytics Software bundles\n applications with different isolation mechanisms and assurances. For example, HPE Ezmeral Unified Analytics Software bundles cloud-native\n applications and open-source web applications. Cloud-native applications such as Kubeflow use\n namespaces to isolate users, whereas web applications such as open-source Airflow and Superset\n require customized changes to the open-source code to support user isolation and roles in HPE Ezmeral Unified Analytics Software . Customization\n entails mapping the HPE Ezmeral Unified Analytics Software user role (member or admin) to permissions in the open-source\n applications. The following table summarizes user isolation in HPE Ezmeral Unified Analytics Software with regard to HPE Ezmeral Unified Analytics Software user roles (admin and member) and application permission\n mappings, as well as the result of changing user roles and deleting users on applications and\n objects: MLflow Airflow Superset Spark Admin Assumes admin role View/Edit access on all experiments Does not have personal models or experiments Assumes admin role View/Edit access on all DAGs Does not have personal DAGs Assumes admin role View/Edit access on all dashboards, datasets, and charts Does not have personal dashboards N/A (no role hierarchy in Spark) Can only view/access personal Spark jobs Member Assumes member role Can only view/access personal experiments No access to other users' experiments and models Assumes custom role (segregated) Must explicitly define own role when creating DAGs to keep private; otherwise,\n DAGs are shared Assumes customized AlphaDbAccessed role with added permissions to create\n database connections Must explicitly define own role when creating DAGs to keep private; otherwise,\n DAGs are shared Can view all dashboards and create charts based on all dashboards. Cannot edit the dashboards N/A (no role hierarchy in Spark; similar to Kubeflow) Can only view/access personal Spark jobs Running in user namespace N/A Yes N/A Yes User role propagation Yes Yes Yes N/A (no role hierarchy in Spark) User deletion Objects remain untouched; only admins have access DAGs remain untouched; only admins have access Objects remain untouched; only admins have access Jobs are removed with the user namespace IMPORTANT Do not modify user roles or permissions in the applications\n that users access through HPE Ezmeral Unified Analytics Software . Modifying roles or permissions directly in an application can break\n the mapping between the HPE Ezmeral Unified Analytics Software user role and application permission setting. For example, do not\n assign an HPE Ezmeral Unified Analytics Software member the Admin role in the Superset application. If you want a user to have admin-level\n permissions in Superset, add the admin role to the user directly in HPE Ezmeral Unified Analytics Software . Changing a user\u2019s\n role to admin in HPE Ezmeral Unified Analytics Software grants the user access to the Administration settings in HPE Ezmeral Unified Analytics Software . To edit a user role,\n see Adding Users . The following topics describe user isolation in more detail for each of the applications that\n curenntly support user isolation: Defining RBACs on MLflow Experiments Defining RBACs on DAGs Defining RBACs in Superset Running Spark Applications in Namespaces Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/user-isolation.html", + "title": "User Isolation" + }, + { + "content": "\nUser Roles Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP servers. Also describes some of the server-related configuration options that you set during installation. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery and EzUserConfig custom resources. Managing Data Access Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . In HPE Ezmeral Unified Analytics Software , a user is\n either a member or an administrator. The user that installs HPE Ezmeral Unified Analytics Software and applies the license\n is the platform administrator. After applying the license, the administrator is prompted to\n sign in using the credentials entered during installation. Once signed in, the administrator\n can add users. See Adding and Removing Users . Any user added and assigned the admin role can also add and remove users. Users that\n are not assigned the admin role are members . Members have access to all areas of HPE Ezmeral Unified Analytics Software except for the\n Administration area. NOTE Admins can only add users that are in the AD/LDAP server. The\n platform administrator configures AD/LDAP settings for HPE Ezmeral Unified Analytics Software during installation. For additional information,\n see Installation and AD/LDAP Servers . Administrators Only users assigned the admin role can see and use the Administration area in\n the left navigation bar. The following list describes the tasks that an admin can perform through the Administration options and provides links to additional information: Settings Upload the activation key and activation code to activate services. See HPE Ezmeral Unified Analytics Software Service Activation and Billing Processes . Update application container images. See Upgrading Included Frameworks . Register an Otel endpoint or view the JDBC endpoint. See Configuring Endpoints and Connect to External Applications via JDBC . Identity & Access Management Add and remove users. See Adding and Removing Users . Data Fabric Connect to HPE Ezmeral Data Fabric clusters. See Connecting to External HPE Ezmeral Data Fabric Clusters . Audit Logs View a chronological set of records that document the events that occur in an HPE Ezmeral Unified Analytics Software cluster. See Audit Logging . More information Adding and Removing Users Adding and Removing Users Programmatically AD/LDAP Servers Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/user-roles.html", + "title": "User Roles" + }, + { + "content": "\nAD/LDAP Servers Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP servers. Also describes some of the server-related configuration options that you set during installation. Working with Certs and the Truststore Describes how to provide a truststore with a valid server certificate, including how to view and locate certs, as well as how to create and validate a truststore for certs. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery and EzUserConfig custom resources. Managing Data Access Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP\n servers. Also describes some of the server-related configuration options that you set during\n installation. When you install HPE Ezmeral Unified Analytics Software , the configuration options vary depending on whether you use the\n internal OpenLDAP server (default) included with HPE Ezmeral Unified Analytics Software or an external AD/LDAP server. After installation, the designated administrator can sign in and grant users permission to\n access HPE Ezmeral Unified Analytics Software and\n assign roles. A user management operator running in HPE Ezmeral Unified Analytics Software sets up local resources for users, such as their user\n profile and workspace, and also enables access. NOTE SSO does not support applications that use AD/LDAP integration to validate credentials\n presented to an external service. The AD/LDAP server supports access by PLAIN (unsecured) LDAP, LDAPS, or StartTLS. Do\n not use PLAIN LDAP in production. If using LDAPS or StartTLS, one or more custom\n certificates may be needed to validate the server certificate. See Working with Certs and the Truststore . The following sections describe the differences between internal and external AD/LDAP\n servers: Internal OpenLDAP Server In HPE Ezmeral Unified Analytics Software , the\n internal directory setup is an OpenLDAP server. Only use the internal directory for POCs and\n demos; do not use the internal directory in production. If you opt to use the internal directory, during installation you specify the following\n information to create the administrator in the system: username full name email password The administrator is the initial user that signs in to HPE Ezmeral Unified Analytics Software to add other users\n and perform administrative tasks. Adding users creates the internal user directory. When you remove a user, the user can no longer access the HPE Ezmeral Unified Analytics Software cluster, and the\n system clears the local resources. See Adding and Removing Users . External AD/LDAP Server If you select Use\n External LDAP Server during installation and configure an external directory, HPE Ezmeral Unified Analytics Software references the\n external AD/LDAP server and gets users from it. When you sign in to HPE Ezmeral Unified Analytics Software , you can search for\n users, grant access, and assign roles. HPE Ezmeral Unified Analytics Software has the\n following external AD/LDAP server requirements: The AD/LDAP server must already exist. The AD/LDAP server must be network-accessible to the deployed HPE Ezmeral Unified Analytics Software instance. For AWS deployments, the AD/LDAP server must be accessible to the VPC where the HPE Ezmeral Unified Analytics Software instance runs. The AD/LDAP server must contain user objects with the required attributes. Any addition,\n removal, or modification of users and their attributes must be done at the AD/LDAP server. The user objects on the external AD/LDAP server must have the following\n attributes: Username Fullname Email UID GID Group GID These attributes are required to federate users from the AD/LDAP server to\n Keycloak. User objects obtained from the direct AD/LDAP integration do not contain\n any role assignments and does not know which users are enabled to use HPE Ezmeral Unified Analytics Software . When you configure the external AD/LDAP server during installation, you specify the\n following information: How to contact the LDAP server. How to bind to the server to find account information. Truststore for validating the server certificate. Information about how user objects are configured. The following table lists and describes some of the AD/LDAP fields that you configure\n during installation: Field Description Active Directory If you do not select the Active Directory (AD) option, the possible schemas\n are more varied. You must enter additional information to properly describe the\n user and group objects. Validation The validation check boxes are for sanity checks before the installation\n starts and during the installation process. The validation can detect issues with\n the AD integration server before the installation is well underway. Only disable\n these options when running the installation container in an environment that\n cannot access AD. Search Base DN Must cover both user and group objects. Security Protocol If the security protocol is LDAPS or StartTLS, the server certificate will be\n validated. If the server certificate was signed by something other than a known\n public CA, a truststore must be provided. A truststore is a JKS file such as those\n created by the Java keytool utility. If a provided truststore is password\n protected, the truststore password must be supplied. Username Attribute Must contain the name of a user object attribute on the server that contains\n a username following some content rules: Starts and ends with a lowercase letter. Contains only lowercase letters and dashes, for example \"hsimpson\" and not\n homer.simpson@mycompany.com. Fullname Attribute Must contain the name of a user object attribute on the server that contains\n the user's full name. This is typically the name attribute on AD\n servers or cn on OpenLDAP servers. Email Attribute Must contain the name of a user object attribute on the server that contains\n the user's email address. Each user must have a unique email address. This is\n typically the mail attribute on AD or OpenLDAP servers. UID Attribute GID Attribute For non-AD servers, the UID Attribute and GID Attribute fields name user\n object attributes that are expected to contain an integer userID or group ID\n value. Similarly for the group GID attribute for group objects. Default Admin User Must identify a user that already exists on the server. The value specified\n here should be the value of the Username Attribute on that user object. Working with Certs and the Truststore Describes how to provide a truststore with a valid server certificate, including how to view and locate certs, as well as how to create and validate a truststore for certs. More information Adding and Removing Users On this page Internal OpenLDAP Server External AD/LDAP Server Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/ad-ldap.html", + "title": "AD/LDAP Servers" + }, + { + "content": "\nWorking with Certs and the Truststore Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP servers. Also describes some of the server-related configuration options that you set during installation. Working with Certs and the Truststore Describes how to provide a truststore with a valid server certificate, including how to view and locate certs, as well as how to create and validate a truststore for certs. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery and EzUserConfig custom resources. Managing Data Access Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Working with Certs and the Truststore Describes how to provide a truststore with a valid server certificate, including how to\n view and locate certs, as well as how to create and validate a truststore for certs. When you use an external AD/LDAP server, Keycloak verifies the server certificate. If the\n certificate is not signed by a commonly known certificate authority (CA), you must provide a\n truststore with the information required to verify that the server certificate is valid. Note the following guidelines and conditions related to certs and the\n truststore: A truststore is needed for StartTLS or LDAPS connections. Different applications with\n potentially different default trusted-certificate stores may need to verify the\n connection, so the means to verify the LDAP server\u2019s certificate must be explicitly\n provided. A truststore contains the certs required to finish the signing chain \u2013 from the issuing\n cert mentioned in the cert that the server presents to the trusted self-signed cert. In\n the case of a self-signed server cert, the chain is the server cert itself. Any truststore that you provide must contain all of the necessary certs. The truststore\n must be a chain of certs signed by certs that terminate in a self-signed cert. Locating the Certs to put in the Truststore If you do not know which certs need to go into the truststore (possibly due to IT\n protocols), use openssl to probe the server to see which certs are being\n presented by the AD/LDAP server. The following commands create the files myserver-cert1.pem,\n myserver-cert2.pem , and so on. These are the certs presented by the server. The\n first one is the server cert, followed by any intermediate certs. LDAPS server The following command probes an LDAPS server running at myserver.com on port\n 636 : openssl s_client -showcerts -verify 10 -connect myserver.com:636 < \\ \n /dev/null | \\ \n awk '/BEGIN/,/END/{if(/BEGIN/) {a++}; out=\"myserver-cert\"a\".pem\"; print >out}' StartTLS server The following command probes an StartTLS server running at myserver.com on port\n 389 : openssl s_client -showcerts -verify 10 -connect myserver.com:389 -starttls ldap < \\ \n /dev/null | \\ \n awk '/BEGIN/,/END/{if(/BEGIN/) {a++}; out=\"myserver-cert\"a\".pem\"; print >out}' Getting the Issuer and Subject from a cert file To get the Issuer and Subject from a cert file, run the following\n command: openssl x509 -in myserver-cert1.pem -text | grep '\\(Issuer\\|Subject\\)' TIP If there is only one cert and it refers to itself as Issuer, that means it is\n a self-signed server cert, and that server cert needs to go into the truststore. If there is a list of Issuers certs, there is typically one Issuer that does\n not have a match among the Subjects. That missing Issuer cert is the next link\n in the trust chain. You will need to get that cert either by way of the CA, your\n IT department, or whoever configured and runs the server. Often the missing\n Issuer is a custom root cert, in which case you only have one cert to put in\n your truststore. If the missing Issuer is not a root cert and is actually an intermediate cert,\n you will need to get the intermediate cert and also get the cert that the\n intermediate cert is signed by and continue this process until you get to the\n root (self-signed) cert. Creating a Truststore Build the truststore with the Java keytool utility by performing a series\n of cert imports. Note the following points from the Keystore docs about accepted input cert formats: Keytool can import X.509 v1, v2, and v3 certificates, and PKCS#7 formatted certificate\n chains consisting of certificates of that type. The data to be imported must be provided either in binary encoding format or in\n printable encoding format (also known as Base64 encoding) as defined by the Internet RFC\n 1421 standard. In the latter case, the encoding must be bounded at the beginning by a\n string that starts with \"-----BEGIN\" and bounded at the end by a string that starts with\n \"-----END\".\" Importing a truststore and setting the password Alias values in the commands are used for readability when dumping the truststore.\n You can use any alias you choose. The first import creates the truststore. During\n the first import, you set the password for the truststore. Subsequent imports will\n ask for this password. To import a self-signed server cert from the servercert.pem file, run the following command: NOTE This is the\n only command you hae to run for a self-signed\n certificate. keytool -importcert -alias selfsigned \\ \n -file servercert.pem -keystore truststore.jks When asked if you want to trust it, respond with yes . Importing a custom root cert and intermediate certs ATTENTION If you follow the instructions to import down the trust\n chain, you should not be asked whether any of the intermediate certs should\n be trusted because keytool should be aware of what cert they were signed by. If you\n get that question when importing an intermediate cert, you may have missed a link in\n the chain or you are importing in the wrong order. To import a custom root\n cert and intermediate certs, start by running the following command to import the\n custom root cert ( root.pem in this\n example): keytool -importcert -alias root \\ \n -file root.pem -keystore truststore.jks When asked if you want to trust it, respond with yes . Importing intermediate certs If you have intermediate certs to import, start with the one closest to the root,\n and work down the signing chain toward the server cert. If your first intermediate\n cert was signed by a default trusted cert, run the following command to import it\n (example filename intermediate.pem ): keytool -importcert -trustcacerts -alias intermediate \\ \n -file intermediate.pem -keystore truststore.jks For any\n intermediate cert signed by something previously imported into your truststore, run\n the following command to import it without the trustcacerts argument: keytool -importcert -alias intermediate \\ \n -file intermediate.pem -keystore truststore.jks Validating a Truststore Run the command appropriate for your server type and then press enter to kill the\n connection. If the validation is successful, the system returns the following message: Verify return code: 0 (ok) If the truststore is not correct, the system returns the following message: Verify return code: 20 (unable to get local issuer certificate) LDAPS To validate a truststore, run the following command: NOTE The following example\n validates that a truststore named truststore.jks with password mypass works for an LDAPS server running at myserver.com port 636 as follows: openssl s_client -verify 10 -connect myserver.com:636 \\ \n -CAfile <(keytool -list -rfc -keystore truststore.jks -storepass mypass) StartTLS To validate a truststore, run the following command: NOTE The following example\n validates that a truststore named truststore.jks with password mypass works for an LDAPS server running at myserver.com port 389 as follows: openssl s_client -verify 10 -connect myserver.com:389 -starttls ldap \\ \n -CAfile <(keytool -list -rfc -keystore truststore.jks -storepass mypass) On this page Locating the Certs to put in the Truststore Creating a Truststore Validating a Truststore Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/truststore.html", + "title": "Working with Certs and the Truststore" + }, + { + "content": "\nAdding and Removing Users Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP servers. Also describes some of the server-related configuration options that you set during installation. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery and EzUserConfig custom resources. Managing Data Access Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . The user search field is only enabled for HPE Ezmeral Unified Analytics Software installations configured to use an external AD/LDAP directory.\n The search field does not work for installations using the internal OpenLDAP configuration;\n however, an admin can still add new users. For an external AD/LDAP directory, complete the following steps to add a user: Sign in to HPE Ezmeral Unified Analytics Software . In the left navigation bar, select Administration > Identity & Access\n Management . In the search field, enter a substring search on the user's username or email ID and\n then enable HPE Ezmeral Unified Analytics access. You can also assign the admin role if\n you want the user to have administrative access in HPE Ezmeral Unified Analytics Software . For the internal OpenLDAP directory, complete the following steps to add a user: Sign in to HPE Ezmeral Unified Analytics Software . In the left navigation bar, select Administration > Identity & Access\n Management . Click Add User . In the drawer that opens, enter the following information: Username - Enter the username. First Name - Enter the first name of the user. Last Name - Enter the last name of the user. Email ID - Enter the email ID associated with the user. Password - Enter the password for the user. Role - Selecting Administrator assigns the user the administrator role, which\n gives the user permission to act as an administrator in the HPE Ezmeral Unified Analytics Software UI. If you do\n not select Administrator, the user is assigned the member role. To remove a user: In the list of users, select the user you want to remove. Click into the Actions column, and click the Delete option. Alternatively,\n click the Delete button on the screen. The system prompts you to confirm the\n action. Once you confirm, the user is removed. To edit the role and password for a user: In the list of users, select the user you want to edit. Click into the Actions column, and click the Edit option. In the drawer that opens, change the password and role for the user. Click Update . Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/add-users.html", + "title": "Adding and Removing Users" + }, + { + "content": "\nAdding and Removing Users Programmatically Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP servers. Also describes some of the server-related configuration options that you set during installation. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery and EzUserConfig custom resources. Managing Data Access Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery\n and EzUserConfig custom resources. The user management operator in HPE Ezmeral Unified Analytics Software responds to the\n EzUserQuery and EzUserConfig custom resources when they are created by a client with the\n required Kubernetes API permissions. Use the administrative kubectl config that you get when you create the HPE Ezmeral Unified Analytics Software cluster to onboard and manage users programmatically\n through the Kubernetes API. To onboard a user, complete the following steps: Use the EzUserQuery custom resource to search for the user in the internal or external\n AD/LDAP directory. The EzUserQuery returns a list of attributes for a user, including the\n Keycloak ID. The Keycloak ID is required to onboard a user. Use the EzUserConfig custom resource to onboard the user. The following sections describe the custom resources: EzUserQuery Use the EzUserQuery custom resource to query the user AD/LDAP directory. The EzUserQuery properties map directly to the query types of the Keycloak user API.\n Providing values for the email , firstName , lastName , and/or username properties sets criteria that must match the returned users. The search property value is typically the most useful and can match against the email or username . Keycloak returns the query response and the status updates. The query results are bounded\n and a query only returns up to five results. Narrow your search criteria to reduce the\n number of results returned. Results show you attributes for the user, for example if they\n are enabled (true/false), id (keycloak user ID), and role (admin or not). EzUserQuery resources self-delete after they expire. Using the EzUserQuery Custom Resource In a YAML file, add the following properties, specifying your own\n values: apiVersion: ezconfig.hpe.ezaf.com/v1alpha1\nkind: EzUserQuery\nmetadata:\n name: my-query-1\nspec:\n search: joel To create and get the query, run the following commands, specifying your YAML\n file name: kubectl create -f query.yaml\n\nkubectl get ezuserquery A ready status indicates that there are query results. This is the status.status property. To query the AD/LDAP directory, run the following command, specifying your query\n name: kubectl get ezuserquery my-query-1 -o yaml The\n command returns results similar to those shown in the following image: The userQuery property\n displays the user attributes. EzUserConfig Use the EzUserConfig custom resource to enable/disable users and manage user roles. EzUserConfig identifies the user (via keycloak ID) and indicates the roles that a user\n should have when onboarded. The following table describes the differences between internal and external AD/LDAP servers\n when using EzUserConfig: AD/LDAP Server Type Description Internal EzUserConfig creates and enables a user. Deleting an\n EzUserConfig disables and deletes the user. External EzUserConfig enables a user. EzUserConfig identifies the user (via\n Keycloak ID) and sets the user role. Deleting an EzUserConfig disables the user. The EzUserConfig status stanza shows user attributes, whether the user is\n successfully enabled, the roles that have successfully been assigned, and any error\n messages. TIP The user management operator actually onboards the user. Enabled is not a role that you can assign to a user. To show existing ezuserconfigs ,\n run: kubectl get ezuserconfig Using the EzUserConfig Custom Resource In a YAML file, add the following properties, specifying your own\n values: apiVersion: ezconfig.hpe.ezaf.com/v1alpha1\nkind: EzUserConfig\nmetadata:\n name: my-admin-user-1\nspec:\n id: 04ef844e\n roles:\n - admin Note that the ID is the Keycloak ID that you can get using the\n EzUserQuery custom resource. To see a list of all the attributes for a user, run the following command,\n specifying the name of the user you want to see attributes\n for: get ezuserconfig joel -o yaml The command returns\n results similar to those shown in the following image: On this page EzUserQuery EzUserConfig Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/onboard-programmatically.html", + "title": "Adding and Removing Users Programmatically" + }, + { + "content": "\nManaging Data Access Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . User Isolation Describes user isolation in HPE Ezmeral Unified Analytics Software . User Roles Describes roles that you can assign to users in HPE Ezmeral Unified Analytics Software . AD/LDAP Servers Describes the differences between the internal OpenLDAP server in HPE Ezmeral Unified Analytics Software and external AD/LDAP servers. Also describes some of the server-related configuration options that you set during installation. Adding and Removing Users Describes how administrators can add and remove users in HPE Ezmeral Unified Analytics Software . Adding and Removing Users Programmatically Describes how to add and remove users through the Kubernetes API using the EzUserQuery and EzUserConfig custom resources. Managing Data Access Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Managing Data Access HPE Ezmeral Unified Analytics Software administrators\n have unrestricted access to all the data sources and underlying schemas, tables, views, and\n buckets. Admins can grant members the following types of access to data sources, schemas,\n tables, views, and buckets. Read Write Read & Write Admins can grant public access to a data source such that all HPE Ezmeral Unified Analytics Software members have full\n access (read, write, and execute) to all the data within the data source. Alternatively,\n admins can grant one or more members access to specific schemas, tables, views, or buckets\n within a data source. Any access granted can also be revoked by an admin. When members do not have access to a data source, the data source appears greyed out on the\n screen. Members cannot access any of the data within that data source. Any attempts to access\n the data results in an access denied error. Members should contact their HPE Ezmeral Unified Analytics Software admin to request access\n to data. The following sections provide the steps for granting and revoking access to data sources,\n schemas, tables, and views. Granting a Member Access to Data HPE Ezmeral Unified Analytics Software administrators can grant a member access to one or more tables, views, or buckets in a\n schema. To grant a member access to data, complete the following steps: Sign in to HPE Ezmeral Unified Analytics Software . In the left navigation bar, select Administration > Identity & Access\n Management . On the Identity and Access Management screen, locate the user. In the Actions column of the user row, click the three-dots and select Manage\n Privileges . On the Manage Privileges screen, select the Structured Data or Object\n Store Data tab, depending on the type of data that you want to grant the user access\n to. Expand a data source and select a schema. In the Datasets area, select the tables, views, or buckets that you want to grant\n the user access to. You can grant Read , Write or Read & Write access. If you are only granting the user access to a single table, view, or bucket, use the Access Type column dropdown in the row of the table, view, or bucket. If you are granting the user access to multiple tables, views, or buckets, use the Bulk Access dropdown to the right of the Search field, and select the\n access type you want to grant the user on all of the selected tables, views, or\n buckets. Click Update Privilege . The system displays the message: Updated\n privileges for the user: Granting All Members Access to a Data Source (Public Access) HPE Ezmeral Unified Analytics Software administrators can make a data source publicly accessible. When an admin makes a data source\n publicly accessible, all members have full access (read and write) permissions on the data\n source and the data within it. To make a data source publicly accessible, complete the following steps: Sign in to HPE Ezmeral Unified Analytics Software . In the left navigation bar, select Data Engineering > Data Sources . Select the Structured Data or Object Store Data tab. In the data source tile, click the three-dots. Select Change to public access . In the Data Access dialog, click Proceed or Cancel . If you chose to\n proceed, the system displays the message: Access changed for the data source:\n Revoking Member Access to Data HPE Ezmeral Unified Analytics Software administrators can revoke a member's access to schemas, tables, views, and buckets. Revoking\n access makes the data inaccessible to the member. To revoke member access to data, complete the following steps: Sign in to HPE Ezmeral Unified Analytics Software . In the left navigation bar, select Administration > Identity & Access\n Management . On the Identity and Access Management screen, locate the user. In the Actions column of the user row, click the three-dots and select Manage\n Privileges . On the Manage Privileges screen, select the Structured Data or Object\n Store Data tab, depending on the type of data that you want to revoke access\n to. Expand the data source and select the schema that contains the data you want to revoke\n access to. In the Datasets area, select the tables, views, or buckets that you want to\n revoke access to. If you are only revoking access to one table, view, or bucket, select No\n Access in the Access Type column for the table, view, or bucket. If you are revoking access to multiple tables, views, or buckets, select the tables,\n views, or buckets and then use the Bulk Access dropdown (to the right of the Search field) and select No Access . Click Update Privilege . The system displays the message: Updated\n privileges for the user: Revoking Public Access to a Data Source HPE Ezmeral Unified Analytics Software administrators can revoke all access to a ata source. Revoking public access to a data\n source makes the data in the data source totally inaccessible to all members. Only admins\n can access the data source and the data within it. Sign in to HPE Ezmeral Unified Analytics Software . In the left navigation bar, select Data Engineering > Data Sources . Select the Structured Data or Object Store Data tab. In the data source tile, click the three-dots. Select Change to private access . In the Data Access dialog, click Proceed or Cancel . If you chose to\n proceed, the system displays the message: Access changed for the data source:\n On this page Granting a Member Access to Data Granting All Members Access to a Data Source (Public Access) Revoking Member Access to Data Revoking Public Access to a Data Source Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/data-access-management.html", + "title": "Managing Data Access" + }, + { + "content": "\nExpanding the Cluster Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how\n to expand the cluster to include the additional user-provided hosts. Expand the cluster when applications cannot run due to resource limitations, such as lack of\n vCPU. When applications do not have enough resources to run, the system raises an\n alarm to alert you of the issue. In such cases, the HPE Ezmeral Unified Analytics Software administrator and system administrator can work together\n to add additional user-provided hosts to the pool of machines in the management cluster\n (control plane nodes) and workload cluster to increase the processing capacity of the cluster. The following steps outline the cluster expansion process: An application triggers an alert to users that it does not have sufficient resources to\n run. Users contact the system administrator to request additional resources (add additional user-provided hosts to\n the management cluster). A system administrator adds user-provided hosts to the cluster, as described in the section Adding User-Provided Hosts to the Cluster . After the system administrator adds user-provided hosts to the cluster, the HPE Ezmeral Unified Analytics Software administrator signs into the HPE Ezmeral Unified Analytics Software UI and expands the\n cluster, as described in the section Expanding the Cluster . Adding User-Provided Hosts\n to the Cluster You can only add user-provided hosts to the cluster. User-provided hosts are machines that\n meet the installation prerequisites, as described in Installation Prerequisites . TIP If you want to use the high-availabilty (HA) feature when you expand the cluster,\n note that HA requires three master nodes. You must add two hosts to the ezfabric-host-pool with the controlplane role. If you want to increase the VCPU or VGPU resources when you expand the cluster, you\n must add worker hosts or GPU hosts with enough resources (VCPU or VGPU) to ezfabric-host-pool with the worker role. SSH into the VM that launched the installation UI when you installed HPE Ezmeral Unified Analytics Software . This is the VM\n that launched the installation UI when you ran the installation UI script\n ( ./start_ezua_installer_ui.sh ), as described in Run the Installation Script to Access the Installer Web UI . To run a bash session in the UI installer Docker container on the VM, issue the\n following command: docker exec -it hpe-ezua-installer-ui bash Create a /tmp/expand directory: mkdir /tmp/expand\ncd /tmp/expand Copy the following files into the /tmp/expand directory and create an add_host.yaml file in the /tmp/expand directory: cd /tmp/expand\ncp /tmp/ezkf-orchestrator/mgmt-kubeconfig ./mgmt-kubeconfig\ncp /root/ezua-installer-ui/ezfabricctl_linux_amd64 ./ezfabricctl\ncp /tmp/hostPoolConfig.yaml ./add_host.yaml Edit the /tmp/expand/add_host.yaml file: IMPORTANT Do not change the defaultHostCredentials object. The sshPassword for the defaultHostCredentials must be a base64 encoded string. For example, if your password is abcde12345! , converting it to a base64 encoded string changes\n it to YWJjZGUxMjM0NSE= . Under the hosts: object, remove the worker and/or control plane\n nodes that are listed from any previous action. Add the new worker and/or control planes nodes. In the following example, node\n 10.10.10.123 is specified as the control plane node being added to the host pool,\n and node 10.10.10.223 is being specified as the worker node being added to the\n host pool: defaultHostCredentials:\n sshUserName: root\n sshPassword: YWJjZGUxMjM0NSE=\n sshPort: 22\nhosts:\n- host: 10.10.10.123\n labels:\n role: controlplane\n- host: 10.10.10.223\n labels: \n role: worker Make ezfabricctl executable: chmod +x ezfabricctl NOTE The ezfabricctl command-line program is available in the installer UI\n Docker container. The installer UI invokes this program for cluster expansion. To add the new node(s) to the pool, run the following\n command: ./ezfabricctl poolhost init --input add_host.yaml --kubeconfig mgmt-kubeconfig To check node status: SSH in to the Ezmeral Coordinator . The Ezmeral Coordinator is the node that orchestrates the deployment of HPE Ezmeral Unified Analytics Software instances,\n as described in Node Setup . TIP If you need to identify the Ezmeral Coordinator node, run the\n following command: kubectl get nodes --kubeconfig To verify that the hosts were added to the hosts pool, run the following command: kubectl get ezph -A The system returns output similar to the\n following: NAMESPACE NAME CLUSTER NAMESPACE CLUSTER NAME STATUS VCPUS UNUSED DISKS GPUS\nezfabric-host-pool 10.10.10.1 ezkf-mgmt ezkf-mgmt Ready 4\t 2 0\nezfabric-host-pool 10.10.10.223 \t Ready 4\t 2 0\nezfabric-host-pool 10.10.10.123 \t Ready 4\t 2 0 NOTE In the kubectl command that you run, ezph is the abbreviated version of ezpoolhosts , an Ezmeral resource that runs against the host\n pool. Expanding the Cluster In a user-provided host\n configuration, the hosts within the pool (namespace) must have enough vCPUs and vGPUs for\n the cluster expansion to succeed. If you request more vCPUs and vGPUs than are available,\n the cluster expansion will fail. ATTENTION If repeated attempts\n to expand the cluster fail with an \" already complete \" message, delete any existing\n EzkfOpsExpand custom resources on the workload cluster before you expand the cluster. To\n identify the EzkfOpsExpand custom resources, run the following\n command: kubectl get ezkfopsexpand -A \n# (lists the Expand CR names and namespaces) For each of the EzkfOpsExpand\n custom resources listed in the output, run the following\n command: kubectl delete ezkfopsexpand -n To expand the cluster, complete the following steps: In the left navigation bar, select Administration > Settings . On the Cluster tab, select Expand Cluster . In the Expand Cluster drawer that opens, enter the following information: Number of additional vCPU to allocate. For example, if the current vCPU is 96 and\n you add 4 vCPU, the vCPU increases to a total of 100 vCPU. Select Use GPU if you want to use GPU and it is not already selected. If Use GPU was selected during installation of HPE Ezmeral Unified Analytics Software , this option\n cannot be disabled and stays selected by default. Indicate the additional number of vGPU to allocate. For GPU configuration, if a size was selected during HPE Ezmeral Unified Analytics Software installation,\n you cannot change the size. However, if no vGPU size was selected during installation,\n you can select a size now. For additional information, see GPU Support . If HA was selected during HPE Ezmeral Unified Analytics Software installation, you cannot disable it. If it was not selected\n during installation, you can select it now. Currently HA is available for the workload\n cluster only. You cannot set HA for the management cluster. Click Expand . Configuring HPE MLDE for Added GPU Nodes If you add GPU nodes to the cluster after installing HPE MLDE , you must perform additional steps to ensure HPE MLDE works on these nodes. For details, see Configuring HPE MLDE for Added GPU Nodes . On this page Adding User-Provided Hosts\n to the Cluster Expanding the Cluster Configuring HPE MLDE for Added GPU Nodes Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/expand-cluster.html", + "title": "Expanding the Cluster" + }, + { + "content": "\nShutting Down an HPE Ezmeral Unified Analytics Software Cluster Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade\n tasks. A typical HPE Ezmeral Unified Analytics Software cluster consists of three or more nodes. Three nodes are specifically designated as data\n fabric nodes. If your cluster has exactly three nodes, you can simply run the edf\n shutdown cluster command from the admincli pod to gracefully\n shutdown the HPE Ezmeral Unified Analytics Software cluster, as described in Shutting Down Data Fabric Nodes . If your HPE Ezmeral Unified Analytics Software cluster\n has more than three nodes, the cluster consists of both data fabric and non-data fabric nodes.\n In this case, first identify the data fabric nodes and take note, as described in Identifying Data Fabric Nodes . Shut down the non-data fabric nodes using a\n standard Linux shutdown command or UI power down in the case of AWS, as described in Shutting Down Non-Data Fabric Nodes . Once you have shut down the non-data fabric\n nodes, run the edf shutdown cluster command from the admincli pod to gracefully shutdown the HPE Ezmeral Unified Analytics Software cluster, as described\n in Shutting Down Data Fabric Nodes . After you shut down the cluster and perform maintenance or upgrade tasks, restart the\n cluster, as described in Restarting an HPE Ezmeral Unified Analytics Software Cluster . IMPORTANT HPE Ezmeral Unified Analytics Software does not support graceful shutdown for installations that use the\n internal directory (OpenLDAP server). Users added to the HPE Ezmeral Unified Analytics Software internal directory\n (OpenLDAP server) will not persist beyond the shutdown and restart, making it impossible for\n any user to access the system. Requirements Shutting down an HPE Ezmeral Unified Analytics Software cluster requires: Access to the Kubernetes cluster Admin access in the HPE Ezmeral Unified Analytics Software cluster Identifying Data Fabric Nodes If the cluster consists of more than three nodes, identify the data fabric nodes. The data\n fabric nodes have a CLDB service running on them. The CLDB service is a proprietary data\n fabric service that only runs on data fabric nodes. Run the following command to identify the data fabric nodes: kubectl get pods -n dataplatform -o wide| grep cldb Shutting Down Non-Data Fabric Nodes After you have identified the data fabric nodes, run a standard Linux shutdown command or\n UI power down in the case of AWS. For example, run the following Linux command on the\n non-data fabric nodes to shut them down: shutdown [OPTIONS] [TIME] [MESSAGE] Shutting Down Data Fabric Nodes Use the edf shutdown cluster command to gracefully shut down the data\n fabric nodes and reboot pods. The edf shutdown cluster command shuts down\n the data fabric nodes in an ordered process to prevent certain components from restarting.\n Pods are immediately put into a wait state after the reboot to prevent them from becoming\n operational. You must run the edf shutdown cluster command from the admincli pod of the HPE Ezmeral Unified Analytics Software cluster. To log in to the admincli pod and shut down the data fabric nodes, run the\n following command: kubectl exec -it admincli-0 -n dataplatform -- edf shutdown cluster Restarting an HPE Ezmeral Unified Analytics Software Cluster Complete the following steps to restart the cluster: To log in to the admincli pod and restart the data fabric nodes, run\n the following command: kubectl exec -it admincli-0 -n dataplatform -- edf startup resume To check the status of the nodes, run the following\n command: kubectl get pods mcs-0 -n dataplatform When the mcs pod READY column shows 1/1 ,\n the cluster is fully operational. Power up the non-data fabric nodes. On this page Requirements Identifying Data Fabric Nodes Shutting Down Non-Data Fabric Nodes Shutting Down Data Fabric Nodes Restarting an HPE Ezmeral Unified Analytics Software Cluster Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/graceful-shutdown.html", + "title": "Shutting Down an HPE Ezmeral Unified Analytics Software Cluster" + }, + { + "content": "\nImporting Applications and Managing the Application Lifecycle Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication and applications configured with authentication proxy. Managing Imported Tools and Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Administrators can import, run, and manage customized Kubernetes applications and frameworks\n in HPE Ezmeral Unified Analytics Software .\n Administrators can manage imported applications as well as the applications that were included\n with HPE Ezmeral Unified Analytics Software at the time\n of installation. Imported and included applications appear in the Tools & Frameworks screen in HPE Ezmeral Unified Analytics Software . You can\n access Tools & Frameworks in the left navigation bar. A tile is\n displayed for each application. A yellow Imported label on a tile indicates that the\n application was imported. Importing Custom Kubernetes Applications To import a Kubernetes application, you upload a Helm chart with a tar.gz file extension and specify configuration parameters. After you import your Kubernetes\n applications, you can also manage them in HPE Ezmeral Unified Analytics Software . Unified Analytics supports SSO for imported applications. For detailed instructions, see Importing Applications . Managing Applications HPE Ezmeral Unified Analytics Software provides the following options to manage the\n applications: Configure Delete Update for imported applications ( Managing Imported Tools and Frameworks ) Automatic and manual upgrade for included applications ( Upgrading Included Frameworks For detailed instructions, see the following: Managing Imported Tools and Frameworks Configuring Included Applications Upgrading Included Frameworks Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication and applications configured with authentication proxy. Managing Imported Tools and Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. On this page Importing Custom Kubernetes Applications Managing Applications Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/managing-application-lifecycle.html", + "title": "Importing Applications and Managing the Application Lifecycle" + }, + { + "content": "\nImporting Applications Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication and applications configured with authentication proxy. Managing Imported Tools and Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . Prerequisites Sign in to HPE Ezmeral Unified Analytics Software as Administrator. Configure istio VirtualService to expose the\n endpoint. VirtualService\n Example apiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\n name: {{ include \"test-app.fullname\" . }}\n namespace: {{ .Release.Namespace }}\n labels:\n {{- include \"test-app.labels\" . | nindent 4 }}\nspec:\n gateways:\n - {{ .Values.ezua.VirtualService.istioGateway }}\n hosts:\n - {{ .Values.ezua.VirtualService.endpoint }}\n #The following VirtualService options are specific and depend on the application implementation.\n #This example is a simple application with single service and simple match routes.\n #The URL should point to the corresponding service. \n #Kubernetes provides an internal DNS mapping for services using the format ..svc.cluster.local. \n http:\n - match:\n - uri:\n prefix: /\n rewrite:\n uri: /\n route:\n - destination:\n host: {{ include \"test-app.fullname\" . }}.{{ .Release.Namespace }}.svc.cluster.local\n port:\n number: {{ .Values.service.port }} Configure the values.yaml file of your application chart as\n follows: ezua:\n ... #other EZUA options\n \n virtualService:\n endpoint: \"http://test-app.hpe-staging-ezaf.com\"\n istioGateway: \"istio-system/ezaf-gateway\" Configure SSO for the applications you want to import. See SSO Support for Imported Applications . All the applications must be deployed as Helm charts. You must have the tar.gz file created from the Helm chart for the application\n you want to import. About this task In HPE Ezmeral Unified Analytics Software , you\n can bring your own Kubernetes customized runtime tools and frameworks. To start\n importing applications, follow these steps: Click the Tools & Frameworks icon on the left\n navigation bar. Click the Import Framework button on the top-right of\n the Tools & Frameworks screen. Navigate through\n each step within the Import Framework wizard: Framework Details: Set the following boxes\n on the Framework Details step: Framework Name: Enter the framework name. Version: Enter the framework version. Description: Enter the application description. Category: Select the application category from Data\n Engineering, Analytics, or Data Science. Framework Icon: Click Select File and\n browse the logo image for your application. Framework Chart: Set the following boxes\n on the Framework Chart step: Helm Chart: Select Upload New Chart to\n import a new application. A list of all previously\n imported applications appears in the dropdown. If\n you deleted the previously imported application\n and you want to import the same application again,\n you can choose that application option from the\n dropdown. NOTE If you are using a bitnami helm chart for your\n imported applications in HPE Ezmeral Unified Analytics Software , you must set the volumePermissions to true in the values.yaml file. volumePermissions:\n\n enabled: true When Bitnami starts\n up, it creates a directory inside the\n container. When you set this value to true , it initiates the start of\n an init container that changes the owner of the\n PersistentVolume mount point. When you set this value to false , the permissions remain\n unchanged, which prevents the creation of the\n directory, thus causing the container to fail. Upload Helm Package tar.gz file: Click Select File and\n browse the tar.gz of your\n application Helm chart. Namespace: Enter the namespace for framework. Release Name: Enter the name for this specific installation of\n Helm Chart. Wait: To wait until all the necessary services,\n volumes, pods, are in ready state before\n successfully importing the applications, check Wait . Debug: To get detailed information in error status,\n check Debug . Framework Values: Configure the override values file of your\n application by using the Helm Values (YAML) box. Review: Review the framework details. Click\n the pencil icon in each section to navigate\n to the specific step to change the framework configuration. To import the framework, click Submit on the bottom\n right of the Review step. Results The application of your choice is imported and installed. You can view it on the Tools & Frameworks screen underneath your chosen\n application category. For e.g.: If you imported test-app application under the Data\n Engineering category, you can view test-app on the Tools & Frameworks screen underneath the Data\n Engineering category. Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/importing-applications.html", + "title": "Importing Applications" + }, + { + "content": "\nSSO Support for Imported Applications Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication and applications configured with authentication proxy. Managing Imported Tools and Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication\n and applications configured with authentication proxy. Native Authentication Integrated Applications Add the placeholders like %%OIDC_ISSUER%% and %%LDAP_XXXX%% in values.yaml file. HPE Ezmeral Unified Analytics Software automatically susbtitutes these placeholders with suitable\n values. Authentication Proxy Configured Applications Configure SSO with AuthorizationPolicy: Configure the istio security AuthorizationPolicy before\n importing the application. Example of AuthorizationPolicy: apiVersion: security.istio.io/v1beta1\nkind: AuthorizationPolicy\nmetadata:\n name: {{ .Release.Name }}-auth-policy\n namespace: {{ .Values.ezua.authorizationPolicy.namespace }}\nspec:\n action: CUSTOM\n provider:\n name: {{ .Values.ezua.authorizationPolicy.providerName }}\n rules:\n - to:\n - operation:\n hosts:\n - {{ .Values.ezua.virtualService.endpoint }}\n selector:\n {{- with .Values.ezua.authorizationPolicy.matchLabels }}\n matchLabels:\n {{- toYaml . | nindent 6 }}\n {{- end }} Configure the values.yaml file of your application chart as\n follows: ezua:\n oidc:\n client_id: \"${OIDC_CLIENT_ID}\"\n client_secret: \"${OIDC_CLIENT_SECRET}\"\n domain: \"${OIDC_DOMAIN}\"\n \n domainName: \"${DOMAIN_NAME}\"\n #Use next options in order to configure the application endpoint.\n #Example of a VirtualService is here:\n virtualService:\n endpoint: \"test-app.${DOMAIN_NAME}\"\n istioGateway: \"istio-system/ezaf-gateway\"\n\n authorizationPolicy:\n namespace: \"istio-system\"\n providerName: \"oauth2-proxy\"\n matchLabels:\n istio: \"ingressgateway\" On this page Native Authentication Integrated Applications Authentication Proxy Configured Applications Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/sso-support-for-imported-apps.html", + "title": "SSO Support for Imported Applications" + }, + { + "content": "\nManaging Imported Tools and Frameworks Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication and applications configured with authentication proxy. Managing Imported Tools and Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Managing Imported Tools and\n Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Prerequisites An administrator should sign in to HPE Ezmeral Unified Analytics Software to manage applications. About this task You can configure, delete, or update imported applications and frameworks. Tiles for\n imported tools and frameworks\n display a yellow Imported label. Procedure In the left navigation bar, click Tools & Frameworks . Click the three-dots on the tile of the application you want to\n manage. Perform one of the following tasks: Configure Select Configure . In the editor that opens, modify the application values.yaml file. Click Configure to apply the changes or Cancel to discard the changes. Delete To delete the application, select Delete . You can delete imported\n applications only. You cannot delete the applications that were installed with HPE Ezmeral Unified Analytics Software . Update ATTENTION You cannot undo the update action. Select Update . This Update option is only\n available for imported applications. Browse to the location where the Helm chart is stored and select the Helm\n chart. Click Upload . Clicking Upload enables the Upgrade button in the application tile. To upgrade the application, click Upgrade . More information Configuring Included Applications Upgrading Included Frameworks Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/managing-applications.html", + "title": "Managing Imported Tools and Frameworks" + }, + { + "content": "\nConfiguring Included Applications Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication and applications configured with authentication proxy. Managing Imported Tools and Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Prerequisites: Sign in to HPE Ezmeral Unified Analytics Software as an administrator. To configure the tools and frameworks that were installed with HPE Ezmeral Unified Analytics Software , follow these steps: In the left navigation bar, click Tools & Frameworks . On the application tile, click the three-dots button. Select Configure to open the editor. In the editor, modify the values.yaml file. To apply the changes, click Configure , or to close the editor\n without any changes, click Cancel . Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/configuring-included-apps.html", + "title": "Configuring Included Applications" + }, + { + "content": "\nUpgrading Included Frameworks Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication and applications configured with authentication proxy. Managing Imported Tools and Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Automatic Downloads of Framework Updates Describes how to automatically upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Manual Downloads of Framework Updates Describes how to manually upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Prerequisites: Sign in to HPE Ezmeral Unified Analytics Software as an administrator. You can upgrade frameworks installed with HPE Ezmeral Unified Analytics Software when a new version is available. You can upgrade the included frameworks in two different ways. They are: Automatic downloads of framework updates : If you are using a connected\n (non-air-gapped) environment, you can upgrade the included frameworks by enabling\n automatic downloads. To learn more, see Automatic Downloads of Framework Updates . Manual downloads of framework updates : If you are using a disconnected\n (air-gapped) environment, you must manually upgrade the included frameworks. However, you\n can also manually download framework updates in the connected environment. To learn more,\n see Manual Downloads of Framework Updates . Automatic Downloads of Framework Updates Describes how to automatically upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Manual Downloads of Framework Updates Describes how to manually upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/upgrading-included-apps.html", + "title": "Upgrading Included Frameworks" + }, + { + "content": "\nAutomatic Downloads of Framework Updates Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication and applications configured with authentication proxy. Managing Imported Tools and Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Automatic Downloads of Framework Updates Describes how to automatically upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Manual Downloads of Framework Updates Describes how to manually upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Automatic Downloads of Framework Updates Describes how to automatically upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Prerequisites: Sign in to HPE Ezmeral Unified Analytics Software as an administrator. You can upgrade applications by enabling automatic downloads of framework updates, so that\n the application tile on the Tools & Frameworks screen shows an Upgrade button indicating that a new Helm chart is available or the Tools & Frameworks update bundle will be displayed on the Available Updates table. To upgrade frameworks by enabling automatic downloads, follow these steps: In the left navigation bar, click Administration \u2192 Settings . Click Updates \u2192 Download Updates . To enable the automatic download of framework updates, toggle Enable\n automatic downloads of framework updates . NOTE To disable automatic download\n of framework updates, toggle Disable automatic downloads of framework\n updates . After enabling the automatic downloads of framework updates, you have two choices: Batch framework updates : To upgrade frameworks simultaneously when new\n versions are available, use batch framework updates. To learn more, see Batch Framework Updates . Individual framework updates : To upgrade frameworks sequentially(one at a\n time) when new versions are available, use individual framework updates. To learn\n more, see Individual Framework Updates . Batch Framework Updates If frameworks have updates available for a new version, every hour HPE Ezmeral Unified Analytics Software bundles those updates\n and displays the update bundle on the Available Updates table as Tools & Frameworks . NOTE The table in Available\n Updates is updated every hour whereas if there are any new versions of\n frameworks, you can see the Upgrade button is enabled for that\n framework immediately in the Tools & Frameworks screen for\n individual framework updates. Once you see the Tools & Frameworks update bundle, you can click\n the bundle to view details. In the Details dialog box, you can see\n the name, description, the current version of the framework and chart, and the new available\n version for the framework and chart. Once you see the new available versions for the update,\n you can perform the following actions by clicking on the Actions menu. Update To batch update frameworks immediately, follow these steps: Click Update in the Actions menu. This will open an Update Now dialog box and you can\n compare the current and new available versions of frameworks for upgrade. Click Update Now to immediately start framework\n updates. Wait for framework updates to be in the In\n Progress status. NOTE You cannot cancel framework updates once it\n is in the In Progress status. Result: You can navigate to the Tools &\n Frameworks screen to see frameworks are now in the Upgrading status. Schedule To schedule batch framework updates for later, follow these steps: Click Schedule in the Actions menu. This will open a Schedule Update dialog box and you can compare the\n current and new available versions of frameworks for upgrade. Select a date and time to schedule the update. Wait for framework updates to\n be in the Scheduled status. Once the framework updates are in the Scheduled status,\n you can perform the following actions from the Actions menu. Cancel You can cancel the scheduled updates any time before update starts or\n if updates are not in the In Progress status\n yet. Reschedule You can reschedule the scheduled updates any time before update starts\n or if updates are not in the In Progress status\n yet. Update You can update frameworks immediately even though it has been scheduled\n for a later date and time. Viewing Update History Once your updates are complete, the Tools & Frameworks update details will be displayed in the Update History table. You can click Tools & Frameworks to view details. In the Details dialog box, you can see the name, description, the\n current version of the framework and chart, and the new available version for the\n framework and chart. Individual Framework Updates To update frameworks one at a time, follow these steps: Click Tools & Frameworks in the left navigation bar. If there are any new versions of frameworks, you can see the Upgrade button is enabled for that framework. An enabled Upgrade button only appears if the version of the framework\n currently installed is older than the version available. For example, if a new version\n of Airflow is available, you see the Upgrade button enabled in the application tile for Airflow . Click Upgrade to complete the upgrade for that framework. Repeat steps 1 and 2 to update all frameworks of your choice. Failure and Rollback When you are upgrading frameworks, if one of the framework updates fails, the application\n tile for that framework will be in the Error status and the failed\n application will be rollbacked to the previous version from which you were upgrading to the\n new version. For example: In HPE Ezmeral Unified Analytics Software , if you upgraded ten frameworks and nine frameworks are upgraded and\n in the Ready status, and if one farmework upgrade failed and is in\n the Error status with a warning message on the framework tile, then\n only that failed application is rolled back to the previous version whereas nine frameworks\n are successfully upgraded to new versions. If for some reason the rollback fails and the framework is in the error state, you must\n contact HPE support to resolve this issue. On this page Batch Framework Updates Individual Framework Updates Failure and Rollback Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/automatic-framework-updates.html", + "title": "Automatic Downloads of Framework Updates" + }, + { + "content": "\nManual Downloads of Framework Updates Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Importing Applications Describes how to import applications in HPE Ezmeral Unified Analytics Software . SSO Support for Imported Applications Describes SSO support for imported applications integrated with native authentication and applications configured with authentication proxy. Managing Imported Tools and Frameworks Describes how to configure, delete, and update imported tools and frameworks in HPE Ezmeral Unified Analytics Software . Configuring Included Applications Describes how to configure tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Upgrading Included Frameworks Describes how to upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Automatic Downloads of Framework Updates Describes how to automatically upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Manual Downloads of Framework Updates Describes how to manually upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Manual Downloads of Framework Updates Describes how to manually upgrade tools and frameworks included with the HPE Ezmeral Unified Analytics Software installation. Prerequisites: Sign in to HPE Ezmeral Unified Analytics Software as an administrator. You can upgrade the applications by getting the upgrade bundle (Docker container image) from HPE Support and downloading it. When you\n download the image, the Helm chart is pushed to the internal charts repository and the\n application tile on the Tools & Frameworks screen shows an Upgrade button indicating that a new Helm chart is available. To manually upgrade an included application, follow these steps: IMPORTANT In an\n air-gapped environment, download upgrade bundle (Docker container image) by using the\n air-gap utility and then push the container image to the air gap registry before you\n complete the following steps. To learn more about using air-gap utility, see Using the Air Gap Utility . In the left navigation bar, click Administration \u2192 Settings . Click the Updates tab. The UI submits a Kubernetes job that\n downloads the Helm charts and then uploads them to the local repository. Enter the Image Name for the application distributed by Hewlett Packard Enterprise . Click Download . View the downloaded image in the table. NOTE To\n remove an image, click Delete . After successfully downloading a new image, click Tools &\n Frameworks in the left navigation bar. View the application tile to verify that the tile displays an enabled Upgrade button. An enabled Upgrade button only appears if the version of the application\n currently installed is older than the version downloaded. For example, if you downloaded\n an image for a new version of Airflow , you see the Upgrade button enabled in the application tile for Airflow . To upgrade the application, click Upgrade . Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/manual-framework-updates.html", + "title": "Manual Downloads of Framework Updates" + }, + { + "content": "\nConnecting to External S3 Object Stores Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Administrators can connect HPE Ezmeral Unified Analytics Software to object storage in AWS S3, MinIO, and HPE Ezmeral Data Fabric Object Store .\n Users can then access data in the connected data sources through clients, such as Spark and\n Kubeflow notebooks, without providing an access or secret key. When you configure the data source connection, you provide HPE Ezmeral Unified Analytics Software with the\n access credentials (access key and secret key); the user does not need the access credentials\n because HPE Ezmeral Unified Analytics Software uses a proxy to communicate with clients. Clients talk to the HPE Ezmeral Unified Analytics Software proxy through the data source endpoint URL and pass JWT tokens to\n authenticate users. Users configure clients to talk to the connected object store. Users\n provide the client with the data source name and endpoint URL (as they appear on the data\n source tile in the HPE Ezmeral Unified Analytics Software UI), as well as the bucket they want the client to access. How to Connect HPE Ezmeral Unified Analytics to Object Storage Regardless of which object store you connect to, the general steps are the same with the\n exception of a few connection parameters. IMPORTANT You can create multiple\n object store connections. Each object store connection that you create must have a unique\n name. To connect to an object store: Sign in to HPE Ezmeral Unified Analytics Software . In the left navigation bar, select Data Engineering > Data Sources . On the Data Sources screen, select the Object Store Data tab. NOTE A\n local-s3 MinIO tile is displayed. This local version of MinIO is used internally by HPE Ezmeral Unified Analytics Software and cannot be deleted. Click Add New Data Source . Click the Add\u2026 button in one of the tiles ( HPE Ezmeral Data Fabric Object Store , Amazon, or MinIO). In the drawer that opens, enter the connection properties: HPE Ezmeral Data Fabric Object Store To connect to HPE Ezmeral Data Fabric Object Store , provide the following information: Name - Enter a unique name for the data source. Endpoint - Enter the HPE Ezmeral Data Fabric Object Store URL, for example https://:9000 . Access Key - Enter the HPE Ezmeral Data Fabric Object Store access key. Secret Key - Enter the HPE Ezmeral Data Fabric Object Store secret key. Insecure - Only select this option for POCs or demos; do not select\n for production environments. When the option is not selected, you must add the\n root CA certificate for a secured connection. AWS S3 To connect to AWS S3, provide the following information: Name - Enter a unique name for the data source. Endpoint - Enter the AWS S3 URL, for example https://s3.us-east-20.amazonaws.com . Access Key - Enter the AWS S3 access key. TIP The access\n key and secret key are associated with the IAM user in AWS. The IAM policy\n associated with the user should permit access to buckets. For example, the\n IAM policy should grant the user read, write, and/or create access on\n buckets. Secret Key - Enter the AWS S3 secret key. AWS Region - Enter the AWS region. MinIO To connect to MinIO, provide the following information: Name - Enter a unique name for the data source. Endpoint - Enter the MinIO URL. Access Key - Enter the MinIO access key. Secret Key - Enter the MinIO secret key. Insecure - Only select this option for POCs or demos; do not select\n for production environments. When the option is not selected, you must add the\n root CA certificate for a secured connection. Root Certificate - This is a TLS mode configuration. Add the root CA\n certificate bundle. Click Add . The data source is connected and a new tile for the data source\n displays on the Data Sources screen. IMPORTANT The data source name and endpoint URL display on the tile. Users need this information\n to connect their clients to the data source. Users can navigate to the Data\n Sources screen to get the information.\n See Accessing Data in External S3 Object Stores . Limitations Currently, object storage data source connections have the following limitations: You cannot edit connection properties, such as the access and secret keys, and you\n cannot delete an object store data source after you create the connection.\n Alternatively, you can create a new connection to the data source with a different\n name. Policy authorization is not supported. All HPE Ezmeral Unified Analytics Software users (members and admins) have full\n access to data in connected object stores, including read, write, and execute\n permissions. On this page How to Connect HPE Ezmeral Unified Analytics to Object Storage Limitations Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/connect-object-stores.html", + "title": "Connecting to External S3 Object Stores" + }, + { + "content": "\nConnecting to External HPE Ezmeral Data Fabric Clusters Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. To connect HPE Ezmeral Unified Analytics Software to HPE Ezmeral Data Fabric clusters, you must\n provide the following information about the HPE Ezmeral Data Fabric cluster: CLDB nodes (hostnames or IP addresses) Service ticket TIP To get a list of the CLDB hosts, run the following command on the HPE Ezmeral Data Fabric cluster: maprcli node listcldbs -cluster -json For information about service tickets, see Generating a Service Ticket . Complete the following steps to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster: Sign in to HPE Ezmeral Unified Analytics Software . In the left navigation bar, select Administration > Data Fabrics . On the Data Fabrics page, click Add Data Fabric . In the drawer that opens, enter the following information: Name - Enter the HPE Ezmeral Data Fabric name. IMPORTANT Each HPE Ezmeral Data Fabric connection that you\n create must have a unique name. CLDB Hosts - List one or more CLDB hostnames or IP addresses with the port\n number. If entering more than one CLDB host, use a comma to separate each host name or\n IP address, for\n example: cldb.node.01:7222,cldb.node.02:7222,cldb.node.03:7222 Service Ticket - Paste the HPE Ezmeral Data Fabric service ticket into the field. The service ticket must include\n the HPE Ezmeral Data Fabric cluster\n name at the top. If the service ticket does not include the cluster name, the HPE Ezmeral Unified Analytics Software system cannot\n connect to the HPE Ezmeral Data Fabric cluster. Volume Path - Enter the path to the mounted volume in the HPE Ezmeral Data Fabric cluster. Click Add . The HPE Ezmeral Data Fabric cluster is listed on the Data Fabric page. Status indicates the connection status. TIP When you connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster, it can take one to two minutes for the\n synchronization with the cluster to complete. Once synchronized, the Data Fabric\n connection Status column displays Ready (green light) and the Data\n Fabric name changes to a clickable hyperlink. You can browse the connected HPE Ezmeral Data Fabric volume from the HPE Ezmeral Unified Analytics Software UI. In the left navigation bar, go to Data\n Engineering > Data Sources. On the Data Sources page, click the Data\n Volumes tab. Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/connect-df-clusters.html", + "title": "Connecting to External HPE Ezmeral Data Fabric Clusters" + }, + { + "content": "\nConfiguring Endpoints Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. Configure endpoints in HPE Ezmeral Unified Analytics Software by going to Administration > Settings and\n selecting the Configurations tab. The following sections provide details for each type of endpoint on the Configurations tab: OTel Endpoint The OTel endpoint is the target URL where HPE Ezmeral Unified Analytics Software OTel exporter sends logs and metrics. The OTel endpoint\n enables other OTel collectors to receive cluster metrics and logs in OTel format. When you register an OTel endpoint, the cluster OTel collector exports metric and log data\n to the customer OTel collector hosted at the OTel endpoint. This includes Prometheus metrics\n about cluster performance, billing/metering related data, and app-based metrics for\n Kubeflow, Spark, and Ray. Cluster logging data from Fluent Bit is also sent. You can also\n export the incoming data to tools, such as Grafana or Elasticsearch. OTEL is the standard format for metrics collection. Data only persists for 60 days in\n prometheus. Use the following OTel endpoint format: : The OTel endpoint format: Must be a valid HTTPS host May contain a port Should contain a path Cannot contain other parts, such as a query string or fragment JDBC Endpoint The JDBC endpoint is automatically created when you install and configure HPE Ezmeral Unified Analytics Software . To connect EzPresto to external applications,\n see Connect to External Applications via JDBC . EzCentral Forwarding NOTE EzCentral Forwarding is not supported for\n air-gapped (disconnected) environments. In HPE Ezmeral Unified Analytics Software , you can\n opt-in to forward your metrics collected by Prometheus to EzCentral via OTEL in real-time. EzCentral is a platform managed by HPE that can monitor your HPE Ezmeral Unified Analytics Software clusters\n when you enable EzCentral Forwarding. The metrics forwarding to EzCentral has the following\n benefits: Fast resolution of cluster issues by HPE through efficient cluster management and\n administration. Provides real-time alerts to HPE enabling immediate incident resolution. Enables HPE to warn you regarding unhealthy clusters and take actions to resolve\n issues. Provides valuable metrics to fix bugs and improve user experience. The metrics forwarding to EzCentral is enabled by\n default. The forwarded metrics include: Node \u2013 CPU or Memory or Disk Usage Container \u2013 CPU or Memory GPU usage License or Billing Audit Logs To disable metrics forwarding to EzCentral, toggle off the Metrics Opt In button as\n follows: On this page OTel Endpoint JDBC Endpoint EzCentral Forwarding Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/config-endpoints.html", + "title": "Configuring Endpoints" + }, + { + "content": "\nGPU Support Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . GPU Resource Management Describes the GPU idle reclaim policy used for GPU resource management. Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts\n for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . GPUs provide essential computational power and parallel processing capabilities to accelerate\n the training and inference processes of deep learning models, reading and processing data\n frames, processing SQL queries within Spark, and running experiments using Jupyter notebooks\n integrated with GPUs. The hundreds or thousands of smaller cores working in parallel enable GPUs to process massive\n amounts of data in a short period of time. HPE Ezmeral Unified Analytics Software supports single-access multi-instance GPU. You can\n use MIG GPU when there are multiple applications that require GPU acceleration. By using MIG,\n you can achieve higher resource utilization and cost efficiency. Supported GPU Models To see the GPU models supported by HPE Ezmeral Unified Analytics Software , see GPU Models . MIG Partitioning HPE Ezmeral Unified Analytics Software supports homogenous configuration deployment where\n the GPU is split into N equal parts with the same amount of memory and CUDA cores. All GPU\n models on the same Kubernetes cluster must operate in the same MIG mode or in the same\n configuration mode. HPE Ezmeral Unified Analytics Software does not support any mixed\n configuration across multiple GPU models. In HPE Ezmeral Unified Analytics Software , GPU partitions are presented as whole devices\n by using the MIG mechanism. When an application requests one GPU, the application receives a\n partition. Only one GPU device is visible to the application. To learn more, see CUDA visible devices . During the installation of HPE Ezmeral Unified Analytics Software , you must specify GPU\n partition size (Whole, Large, Medium, and Small) and request the number of GPU instances\n required for the workload. You cannot change the GPU partition size later. For A100 GPU, the partition size will map to the following profiles: To learn about MIG profile names, see MIG Device Names . Unified Analytics vGPU Size No. of Unified Analytics vGPUs per physical GPU MIG Profile - A100-40GB MIG Profile - A100-80GB Description Whole 1\u2013100% No MIG - entire physical GPU No MIG - entire physical GPU A100 GPU models are not split into any partitions. You will get the entire physical GPU. In this configuration, applications can use only one virtual GPU at a time. Large 2 \u2013 42% each 3g.20gb 3g.40gb A100 GPU models are split into two equal partitions. In this configuration, 16% of the GPU will remain idle. Medium 3 \u2013 28% each 2g.10gb 2g.20gb A100 GPU models are split into three equal partitions. In this configuration, 16% of the GPU will remain idle. Small 7 \u2013 14% each 1g.5gb 1g.10gb A100 GPU models are split into seven equal partitions. In this configuration, 2% of the GPU will remain idle. NOTE Once you select a specific partition size, you cannot change this configuration after\n installation. Preparing the GPU Environment HPE Ezmeral Unified Analytics Software supports GPUs on Kubernetes nodes. The underlying\n hosts must be running an operating system and version that is supported on the corresponding\n version of HPE Ezmeral Unified Analytics Software . HPE Ezmeral Unified Analytics Software supports user-provided deployment. Preparing hosts to use GPU in the user-provided host model: If you want to use GPU in HPE Ezmeral Unified Analytics Software , you will have to prepare\n your hosts. The following are the requirements for preparing your hosts before installation\n to use GPU in HPE Ezmeral Unified Analytics Software : The host can be bare metal or VM with GPU pass-through, or an AWS EC2 instance. Install the latest version of the supported operating system. To learn about the\n supported operating system versions for GPU in HPE Ezmeral Unified Analytics Software ,\n see Operating System . NOTE Do not use operating systems\n with pre-installed NVIDIA drivers. HPE Ezmeral Unified Analytics Software does not\n support operating systems with pre-installed NVIDIA drivers. The GPU operator\n automatically installs NVIDIA drivers when the host is added to HPE Ezmeral Unified Analytics Software . Disable SELinux on the host before adding the host to HPE Ezmeral Unified Analytics Software . This is the NVIDIA limitation, see GPU Operator with RHEL8/SELinux . NOTE After successfully\n adding the host to HPE Ezmeral Unified Analytics Software cluster and the successfull\n NVIDIA driver install through the GPU operator, you can enable SELinux on that host\n and set it to enforcing mode. To learn more about user-provided hosts, see Installing on User-Provided Hosts (Connected and Air-gapped Environments) . Environments Description vSphere VM Configure the VMs in the GPU pass-through setup by following the steps in VMware setting up GPU pass-through documentation. Add hosts to the HPE Ezmeral Unified Analytics Software . AWS Use the AWS account with access to provision GPU-based instances (p4d.24xlarge,\n and p4de.24xlarge EC2 instances). Deploy the A100 EC2 instance (P4d instance) with the AMI image in the supported\n operating system. Add hosts to the HPE Ezmeral Unified Analytics Software . Adding Hosts and Enabling GPU Environment After you have prepared hosts to work in the GPU-enabled environment, you must add them to\n the HPE Ezmeral Unified Analytics Software during the installation or during cluster\n expansion. After adding the host, the GPU is enabled automatically. Adding Hosts and Selecting GPU Environment During Installation To add hosts and select the GPU environment in the cluster during installation,\n follow these steps : Perform the installation instructions provided in the installation documentation\n for your deployment target until you reach the Installation\n Details step in the installation wizard. See Installation . In the Installation Details step, to enable the GPU, check Use\n GPU . vGPU: Specify the vGPU instances for your\n cluster. The number of vGPUs allocated depends on the GPU configuration\n partition size, the number of added GPU worker hosts, and the number of GPU\n cards per host. The number of allocated vGPUs may be less than the number of\n requested vGPUs. For example: If one A100 GPU host is added with two\n GPU cards with the following configurations: vGPU request: 10 vGPUs vGPU configuration: large Then the number of allocated vGPUs is as follows: vGPUs allocated: 2 x 2 large per GPU card = 4 GPU Configuration: Specify the GPU partition\n size. NOTE Once you select a specific partition size or specify the number of GPU\n instances, you cannot change this configuration after installation. As each node is added to the HPE Ezmeral Unified Analytics Software inventory node pool, HPE Ezmeral Unified Analytics Software configures the MIG profile if it detects\n MIG-capable devices (e.g., A100). To specify the details for other boxes or options in the Installation Details step and to complete the cluster\n installation, refer to the installation documentation for your deployment\n target. See Installation . Adding Hosts and Selecting GPU Environment During Cluster Expansion To add hosts and select the GPU environment in the cluster during cluster expansion,\n follow these steps: Perform the steps to expand the cluster until you reach the Expand\n Cluster screen. See Expanding the Cluster . To enable the GPU, in the Expand Cluster screen, check Use GPU . NOTE If you enabled the Use\n GPU option during the cluster installation, then that means GPU is\n already enabled and you cannot disable the Use GPU option\n while expanding the cluster. vGPU: Specify the additional vGPU instances for your\n cluster. NOTE The number of additional vGPUs allocated depends on the GPU\n configuration partition size, the number of added GPU worker hosts, and the\n number of GPU cards per host. The number of allocated vGPUs may be less\n than the number of requested vGPUs. GPU Configuration: Specify the GPU partition size. NOTE If you selected the partition size during the cluster installation,\n you can not update the partition size while expanding the\n cluster. To specify the details for other boxes or options in the Expand\n Cluster screen and to complete the cluster expansion, see Expanding the Cluster . Viewing GPU Model Information To retrieve the information about the GPU model installed in HPE Ezmeral Unified Analytics Software for your operating system,\n run: lspci | grep -i nvidia | awk -F'' '{print $2}' To learn more about supported operating systems for GPUs in HPE Ezmeral Unified Analytics Software , see Operating System . Integrating GPU with Applications and Frameworks In HPE Ezmeral Unified Analytics Software , both imported and included applications and\n frameworks support GPU. With a MIG configuration, only one GPU is assigned per application.\n Applications request GPUs using the nvidia.com/gpu resource specifier\u200b. NOTE HPE Ezmeral Unified Analytics Software does not support MIG specifier nvidia.com/mig-Xg.YYgb . The following applications and frameworks support GPU in HPE Ezmeral Unified Analytics Software : Kubeflow Kale or KFP. See Enabling Kale Extension in Kubeflow Notebook . Kubeflow KServe. See Enabling GPU Support on Kubeflow Kserve Model Serving . Kubeflow Notebooks. See Creating GPU-Enabled Notebook Servers . Ray. See GPU Support for Ray . Spark. See Enabling GPU Support for Spark . GPU Resource Management Describes the GPU idle reclaim policy used for GPU resource management. On this page Supported GPU Models MIG Partitioning Preparing the GPU Environment Adding Hosts and Enabling GPU Environment Viewing GPU Model Information Integrating GPU with Applications and Frameworks Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/gpu-support.html", + "title": "GPU Support" + }, + { + "content": "\nGPU Resource Management Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . GPU Resource Management Describes the GPU idle reclaim policy used for GPU resource management. Configuring GPU Idle Reclaim Describes how to configure the GPU idle reclaim, view pod details, and view GPU usage. GPU Scheduling Workload Scenarios Describes GPU scheduling workload scenarios and the notebook example for GPU idle reclaim. Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. GPU Resource Management Describes the GPU idle reclaim policy used for GPU resource management. GPU resource management enables you to optimize the analytical workloads by distributing the\n GPU resources to various workloads so that each workload receives the necessary computing\n power. HPE Ezmeral Unified Analytics Software implements the GPU idle reclaim feature to maximize\n GPU utilization by dynamically allocating and deallocating resources to different frameworks\n and workloads as needed. This prevents overallocation and underutilization of the GPU\n resources and increases efficiency. GPU resource management uses a priority policy to ensure that critical workloads get the\n resources they need while also allowing lower-priority workloads to utilize the GPU when it is\n available. When a workload or framework is finished using its GPU resources, HPE Ezmeral Unified Analytics Software initiates GPU resource reclamation. This involves\n deallocating the resources and making them available for other workloads. Custom Scheduler HPE Ezmeral Unified Analytics Software runs its own scheduler which functions\n independently and is not connected to the default Kubernetes scheduler. Note that the default Kubernetes scheduler is still available alongside this custom\n scheduler. The custom scheduler is an enhanced version of the default Kubernetes scheduler\n that includes the GPU idle reclaim plugins and the preemption tolerance. The custom scheduler plugin governs all GPU workloads and is installed in the hpe-plugin-scheduler namespace. This namespace consists of a controller\n and a scheduler module. The scheduler is responsible for scheduling and reclaiming. There are two pods in the scheduler namespace. They are scheduler-plugins-controller-dc8fbd68-2plns and scheduler-plugins-scheduler-5c9c5579cb-xz48q . You can see logs of\n the scheduler-plugins-scheduler-5c9c5579cb-xz48q pod to find details of\n the GPU reclamation and pod preemption. To see the logs,\n run: ks logs -f scheduler-plugins-scheduler-5c9c5579cb-xz48q -n hpe-plugin-scheduler Custom Scheduler Configurations HPE Ezmeral Unified Analytics Software sets the default configurations for the tools and\n frameworks supporting GPU workloads so that the custom scheduler is used by default. The\n following tools and frameworks support GPU workloads: Kubeflow Spark Livy Ray HPE MLDE Every GPU workload for Kubeflow, Spark, Livy, Ray, and HPE MLDE have the following\n configurations set as part of their pod spec to use the custom scheduler by default. schedulerName: scheduler-plugins-scheduler priorityClass: --gpu For example, For Kubeflow notebooks: kubeflow-notebook-gpu For Spark: spark-gpu (Note: There is no component name for\n Spark) Only pods with their spec.schedulerName set to scheduler-plugins-scheduler are considered for reclaiming. You must not modify these configurations for the GPU reclamation. If your GPU pod spec is\n not set to scheduler-plugins-scheduler , the default Kubernetes scheduler\n will operate instead of the custom scheduler. The scheduler runs a cron job every 5-10 minutes. Every 5-10 minutes, the scheduler looks\n at the running pods and determines the feasibility of reclaiming pods based on their GPU\n usage and the annotation values set in the priority class attached to the pod. If the pod is\n eligible for preemption, the GPU is reclaimed, and the pending pods are granted resources.\n Pods without any GPU usage or idle pods grant their resources to the pending pods. NOTE Workloads with an idle GPU will not be preempted unless there is a pending request\n from another workload for GPU. GPU Configurations In HPE Ezmeral Unified Analytics Software , you can configure the priority level and idle time threshold from the GPU Control Panel screen.\n However, you cannot configure the toleration seconds and GPU usage threshold for workloads. To learn more about GPU control panel, see Configuring GPU Idle Reclaim . Priority class and priority level HPE Ezmeral Unified Analytics Software attaches priority classes as pod specs to the\n deployed pods to prioritize pods. The priority class has a number called priority level that determines the importance of a pod. The custom scheduler determines the priority based on this priority level. The\n default priority level for all pods is 8000. You can set the priority level from 8000-9999 where 8000 is the lowest priority level\n and 9999 is the highest priority level. You can update the priority level for your\n applications and workloads from the GPU Control Panel screen. Idle time threshold You can also set the idle time threshold for GPU from the GPU Control Panel\n screen. The idle time threshold for GPU means the maximum amount of time a GPU\n can remain idle without running any workloads. If a GPU remains idle for a duration\n exceeding this threshold, then the GPU on those workloads can be reclaimed to make the\n GPU available for other workloads. Toleration seconds Toleration seconds is the minimum number of seconds the pod or workload needs\n to run before it can be preempted. The default toleration seconds is set to 300\n seconds. GPU usage threshold The GPU usage threshold is the level of GPU utilization. The default usage\n threshold is set to 0.0. If any pod has a GPU usage of greater than 0 in the last 300\n seconds, it cannot be preempted. For any pods to be preempted, the usage must be\n 0.0. Configuring GPU Idle Reclaim Describes how to configure the GPU idle reclaim, view pod details, and view GPU usage. GPU Scheduling Workload Scenarios Describes GPU scheduling workload scenarios and the notebook example for GPU idle reclaim. On this page Custom Scheduler GPU Configurations Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/gpu-resource-management.html", + "title": "GPU Resource Management" + }, + { + "content": "\nConfiguring GPU Idle Reclaim Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . GPU Resource Management Describes the GPU idle reclaim policy used for GPU resource management. Configuring GPU Idle Reclaim Describes how to configure the GPU idle reclaim, view pod details, and view GPU usage. GPU Scheduling Workload Scenarios Describes GPU scheduling workload scenarios and the notebook example for GPU idle reclaim. Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Configuring GPU Idle Reclaim Describes how to configure the GPU idle reclaim, view pod details, and view GPU\n usage. You can view frameworks, the number of vGPUs assigned, framework status, priority level,\n and the idle time threshold in the GPU Control Panel screen. You\n can also view the pod details and the GPU utilization chart. To navigate to the GPU Control Panel screen, Sign in to HPE Ezmeral Unified Analytics Software as an administrator. In the left navigation bar, click Administration \u2192 Resource Management . You are now in the GPU Control Panel screen. In this screen, you can configure the policy settings, view the pod details and GPU usage\n as follows: Configuring the Policy Settings To set the policy settings (priority level and idle time threshold) for your\n framework and workload, click the Actions menu. In the Policy Settings screen, set the following boxes: Priority Level Set the priority level in the range of 8000-9999 where 8000 is the\n lowest priority and 9999 is the highest priority. For example, a pod\n with the 8000 priority level will have a low priority compared to\n the pod with the 9999 priority level. Default priority level: 8000 Idle Time Threshold Set the maximum amount of time a vGPU on a workload can be idle\n before that workload can be preempted (deallocated) automatically by\n a pending workload. Minimum idle time threshold: 60 seconds Default idle time threshold: 300 seconds The new policy settings will not be applied to the pods that are currently in the Running or Idle status. These new\n policy settings will be applied to the new workloads. Viewing the Pod Details To view the pod details, click frameworks that are in the Idle or Running status.\n This will open a pod detail screen. Here, you can see a list of pods, vGPU assigned,\n status, age of pods, and the GPU utilization chart. Viewing the GPU Usage To view the GPU usage, click the GPU utilization chart icon\n under Actions . In the GPU utilization\n screen , you can view the GPU usage for the selected period. On this page Configuring the Policy Settings Viewing the Pod Details Viewing the GPU Usage Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/configure-idle-reclaim.html", + "title": "Configuring GPU Idle Reclaim" + }, + { + "content": "\nGPU Scheduling Workload Scenarios Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . GPU Resource Management Describes the GPU idle reclaim policy used for GPU resource management. Configuring GPU Idle Reclaim Describes how to configure the GPU idle reclaim, view pod details, and view GPU usage. GPU Scheduling Workload Scenarios Describes GPU scheduling workload scenarios and the notebook example for GPU idle reclaim. Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. GPU Scheduling Workload Scenarios Describes GPU scheduling workload scenarios and the notebook example for GPU idle\n reclaim. In HPE Ezmeral Unified Analytics Software , you can encounter the following GPU scheduling\n workload scenarios during the GPU idle reclamation. GPU Idle Reclaim In HPE Ezmeral Unified Analytics Software , consider two GPU workloads, denoted as Workload1 and Workload2 . Currently, Workload1 is running and is in an idle state while Workload2 is pending due to lack of available GPU resources. In this\n scenario, if the idle duration of Workload1 exceeds an idle time threshold, Workload1 is preempted in favor of Workload2 . Following\n the preemption, Workload1 goes into a pending state, while Workload2 is allocated GPU resources and starts running. Active GPU Usage In HPE Ezmeral Unified Analytics Software , consider two GPU workloads, denoted as Workload1 and Workload2 . Currently, Workload1 is running and is using GPU resources while Workload2 is pending due to lack of available GPU resources. The custom\n scheduler runs a cron job every 5-10 minutes to determine the eligibility of reclaiming pods\n based on their GPU usage and the annotation values set in the priority class attached to the\n pod. If the GPU usage for Workload1 is greater than 0.0, Workload1 cannot be preempted in favor of Workload2 . In\n this scenario, Workload1 will continue to run and utilize the GPU resources\n without interruption. If the GPU usage for Workload1 is equal to 0.0 and if the idle duration of Workload1 exceeds an idle time threshold, Workload1 is\n preempted in favor of Workload2 . Following the preemption, Workload1 goes into a pending state, while Workload2 is\n allocated GPU resources and starts running. Priority Scheduling In HPE Ezmeral Unified Analytics Software , consider three GPU workloads, denoted as Workload1 , Workload2 , and Workload3 .\n Currently, Workload1 is running and is in an idle state, Workload2 is pending due to lack of available GPU resources, and Workload3 has the highest priority among the three workloads and is\n pending due to lack of available GPU resources. In this scenario, if the idle duration of Workload1 exceeds an idle time threshold, Workload1 is\n preempted in favor of Workload3 . Following the preemption, Workload1 goes into a pending state, Workload3 is\n allocated GPU resources and starts running, and Workload2 will continue to\n be in the pending state. Notebook Example for GPU Idle Reclaim Consider a scenario in which HPE Ezmeral Unified Analytics Software is configured with a\n single physical GPU. In this scenario, you have chosen the small vGPU size, which includes 7\n vGPUs. Each application will always have a maximum of one vGPU assigned to it. Now, assume you have seven notebook servers, denoted as idle-gpu-notebook , used-gpu-notebook-1 , used-gpu-notebook-2 , used-gpu-notebook-3 , used-gpu-notebook-4 , used-gpu-notebook-5 , and used-gpu-notebook-6 . In this\n scenario, the idle-notebook-gpu notebook server has an idle GPU with no GPU\n usage while the six other notebook servers are actively using GPU resources. You can navigate to the GPU Control Panel screen to check the status\n of these notebook servers. There you can see that one notebook server has an Idle status and the six others have a Running status. You can click Notebooks to view the details of each notebook server.\n You can confirm that the idle notebook has no GPU usage, and six others have an active GPU\n usage by clicking the GPU utilization chart icon in the Actions menu. Consider you create another GPU-enabled notebook server, denoted as test-idle-notebook-2 . As the GPU usage for idle-gpu-notebook is equal to 0.0, as soon as the idle duration of idle-gpu-notebook exceeds an idle time threshold, idle-gpu-notebook is preempted in favor of test-idle-notebook-2 . Following the preemption, idle-gpu-notebook goes into a pending state, while test-idle-notebook-2 is allocated GPU resources and starts running. On this page GPU Idle Reclaim Active GPU Usage Priority Scheduling Notebook Example for GPU Idle Reclaim Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/ManageClusters/gpu-scheduling-workload-examples.html", + "title": "GPU Scheduling Workload Scenarios" + }, + { + "content": "\nTroubleshooting Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Installation Describes how to identify and debug issues during installation. Host (Node) Management Describes how to identify and debug issues for hosts. Metering Describes how to identify and debug issues for metering. Monitoring Describes how to identify and debug issues for monitoring. Logging Describes how to identify and debug issues for logging. Airflow Describes how to identify and debug issues for Airflow. EzPresto Describes how to identify and debug issues for EzPresto . Superset Describes how to identify and debug issues for Superset. Spark Describes how to identify and debug issues for Spark. Importing Applications and Managing the Application Lifecycle Describes how to identify and debug issues while importing applications and managing the application lifecycle. Security Describes how to identify and debug issues related to security. GPU Describes how to identify and debug issues for GPU. User Interface Describes how to identify and debug issues related to the HPE Ezmeral Unified Analytics Software UI. Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . To run kubectl commands and perform the admin-related tasks described in\n these topics, sign in to HPE Ezmeral Unified Analytics Software as an administrator. Installation Describes how to identify and debug issues during installation. Host (Node) Management Describes how to identify and debug issues for hosts. Metering Describes how to identify and debug issues for metering. Monitoring Describes how to identify and debug issues for monitoring. Logging Describes how to identify and debug issues for logging. Airflow Describes how to identify and debug issues for Airflow. EzPresto Describes how to identify and debug issues for EzPresto . Superset Describes how to identify and debug issues for Superset. Spark Describes how to identify and debug issues for Spark. Importing Applications and Managing the Application Lifecycle Describes how to identify and debug issues while importing applications and managing the application lifecycle. Security Describes how to identify and debug issues related to security. GPU Describes how to identify and debug issues for GPU. User Interface Describes how to identify and debug issues related to the HPE Ezmeral Unified Analytics Software UI. Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Troubleshooting/troubleshooting.html", + "title": "Troubleshooting" + }, + { + "content": "\nInstallation Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Installation Describes how to identify and debug issues during installation. Host (Node) Management Describes how to identify and debug issues for hosts. Metering Describes how to identify and debug issues for metering. Monitoring Describes how to identify and debug issues for monitoring. Logging Describes how to identify and debug issues for logging. Airflow Describes how to identify and debug issues for Airflow. EzPresto Describes how to identify and debug issues for EzPresto . Superset Describes how to identify and debug issues for Superset. Spark Describes how to identify and debug issues for Spark. Importing Applications and Managing the Application Lifecycle Describes how to identify and debug issues while importing applications and managing the application lifecycle. Security Describes how to identify and debug issues related to security. GPU Describes how to identify and debug issues for GPU. User Interface Describes how to identify and debug issues related to the HPE Ezmeral Unified Analytics Software UI. Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Installation Describes how to identify and debug issues during installation. Kyverno Service Issue The Kyverno installation step returns the following server error: (InternalError): error when creating \"/tmp/platform_ezua_app.yaml\": \nInternal error occurred: failed calling webhook \"mutate-policy.kyverno.svc\": \nfailed to call webhook: Post \"https://kyverno-svc.kyverno.svc:443/policymutate?timeout=10s\": \ncontext deadline exceeded Run the following script on all nodes and then install HPE Ezmeral Unified Analytics Software , as described in Installation Prerequisites and Installing on User-Provided Hosts (Connected and Air-gapped Environments) . #! /bin/bash\n\n# Script to disable ip checksum offload using ethtool for the primary nic. We will create a oneshot systemd service\n# to persist this across reboots\n\n# Setting ipaddress of the node\nHOST_IP=\"$(hostname -i)\"\n\necho \"fetching interface name for host: $HOST_IP\"\nPRIMARY_NIC=$(ip -o a show | grep ${HOST_IP} | awk '{print $2}')\n\necho \"printing current configuration for the nic\"\n\nethtool -k \"${PRIMARY_NIC}\" | grep tx-checksum-ip-generic\n\necho \"creating env and systemd unit file to turn chksum off for interface \\\"$PRIMARY_NIC\\\"\"\n\ncat > /etc/sysconfig/ezfab-chksum-off < /usr/lib/systemd/system/ezfab-chksum-off.service <:443 Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Troubleshooting/ts-installation.html", + "title": "Installation" + }, + { + "content": "\nHost (Node) Management Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Installation Describes how to identify and debug issues during installation. Host (Node) Management Describes how to identify and debug issues for hosts. Metering Describes how to identify and debug issues for metering. Monitoring Describes how to identify and debug issues for monitoring. Logging Describes how to identify and debug issues for logging. Airflow Describes how to identify and debug issues for Airflow. EzPresto Describes how to identify and debug issues for EzPresto . Superset Describes how to identify and debug issues for Superset. Spark Describes how to identify and debug issues for Spark. Importing Applications and Managing the Application Lifecycle Describes how to identify and debug issues while importing applications and managing the application lifecycle. Security Describes how to identify and debug issues related to security. GPU Describes how to identify and debug issues for GPU. User Interface Describes how to identify and debug issues related to the HPE Ezmeral Unified Analytics Software UI. Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Host (Node) Management Describes how to identify and debug issues for hosts. Pods Stuck in Terminating State If you have not updated the SPIFFE CSI driver, as indicated in the Post Installation Steps , and you encounter pods stuck in the Terminating state after restarting, complete the following steps: Run the following command to update the SPIFFE CSI\n driver: kubectl -n spire set image ds spire-spiffe-csi-driver spiffe-csi-driver=ghcr.io/spiffe/spiffe-csi-driver:0.2.5 Remove the pods in the Terminating state. If these steps do not resolve the issue, contact HPE Support. Partners Support Dev-Hub Community Training ALA Privacy Policy Glossary Search Search current doc version", + "url": "https://docs.ezmeral.hpe.com/unified-analytics/13/Troubleshooting/ts-host-management.html", + "title": "Host (Node) Management" + }, + { + "content": "\nMetering Jump to main content HPE Ezmeral Unified Analytics Software 1.3 Documentation Feedback Get Started Describes how to get started with HPE Ezmeral Unified Analytics Software . Administration Provides information about managing applications and clusters in HPE Ezmeral Unified Analytics Software . Installation Provides links to HPE Ezmeral Unified Analytics Software installation and service activation topics. Identity and Access Management Describes identity and access management in HPE Ezmeral Unified Analytics Software . Expanding the Cluster Describes how to add additional user-provided hosts to the management cluster to increase resource capacity and how to expand the cluster to include the additional user-provided hosts. Shutting Down an HPE Ezmeral Unified Analytics Software Cluster Describes how to gracefully shut down an HPE Ezmeral Unified Analytics Software cluster when you want to perform maintenance or upgrade tasks. Importing Applications and Managing the Application Lifecycle Describes how to import, manage, and secure applications and frameworks in HPE Ezmeral Unified Analytics Software . Connecting to External S3 Object Stores Describes how to connect HPE Ezmeral Unified Analytics Software to external S3 object storage in AWS, MinIO, and HPE Ezmeral Data Fabric Object Store . Connecting to External HPE Ezmeral Data Fabric Clusters Describes how to connect HPE Ezmeral Unified Analytics Software to an external HPE Ezmeral Data Fabric cluster. Configuring Endpoints Describes the endpoints in HPE Ezmeral Unified Analytics Software and how to configure them. GPU Support Provides information about support for NVIDIA GPU, MIG partitioning, preparing hosts for GPU-enabled environment, adding hosts and enabling GPU in HPE Ezmeral Unified Analytics Software . Troubleshooting Describes how to identify and debug issues in HPE Ezmeral Unified Analytics Software . Installation Describes how to identify and debug issues during installation. Host (Node) Management Describes how to identify and debug issues for hosts. Metering Describes how to identify and debug issues for metering. Monitoring Describes how to identify and debug issues for monitoring. Logging Describes how to identify and debug issues for logging. Airflow Describes how to identify and debug issues for Airflow. EzPresto Describes how to identify and debug issues for EzPresto . Superset Describes how to identify and debug issues for Superset. Spark Describes how to identify and debug issues for Spark. Importing Applications and Managing the Application Lifecycle Describes how to identify and debug issues while importing applications and managing the application lifecycle. Security Describes how to identify and debug issues related to security. GPU Describes how to identify and debug issues for GPU. User Interface Describes how to identify and debug issues related to the HPE Ezmeral Unified Analytics Software UI. Support Matrix The tables on this page show the tools and frameworks, HPE Ezmeral Data Fabric ,operating system versions , and GPU models that are supported for HPE Ezmeral Unified Analytics Software releases. Release Notes This document provides a comprehensive overview of the latest updates and enhancements in HPE Ezmeral Unified Analytics Software (version 1.3.0), including new features, improvements, bug fixes, and known issues. Observability Describes observability in HPE Ezmeral Unified Analytics Software . Data Engineering Data engineers can design and build pipelines that transform and transport data into usable formats for data consumers. Data Analytics Provides a brief overview of data analytics in HPE Ezmeral Unified Analytics Software . Data Science Provides a brief overview of data science in HPE Ezmeral Unified Analytics Software . Notebooks Provides a brief overview of Notebooks in HPE Ezmeral Unified Analytics Software . Glossary Definitions for commonly used terms in HPE Ezmeral Unified Analytics\n environments. Metering Describes how to identify and debug issues for metering. Resource Usage or Billing Metrics Sometimes the UI does not display or update resource usage or billing metrics. The monitoring and prometheus namespaces are used for\n observability in HPE Ezmeral Unified Analytics Software . Verify that the pods in these\n namespaces are running. To get the list of pods in the monitoring namespace,\n run: kubectl get pods -n monitoring Verify that the ua-application-metrics-generate-cronjob-28079520-6ts28 pod\n and ua-monitor-deployment-c797c5f44 pod are running. If AGE of ua-application-metrics-generate-cronjob-28079520-6ts28 is less than 60\n minutes, the cron job is up to date. To see logs for cron jobs and to view all the aggregated values and output of values,\n run: kubectl logs ua-application-metrics-generate-cronjob-28079520-6ts28 -n monitoring The Uploaded records successfully at: